diff --git a/commons/api_path.go b/commons/api_path.go
index 7d359ba..9a496b7 100644
--- a/commons/api_path.go
+++ b/commons/api_path.go
@@ -38,6 +38,26 @@ var ApiPath = struct {
FindSubnetByName func(vpcId string) string
FindSubnet func(vpcId string, subnetId string) string
ListSubnets func(vpcId string) string
+
+ Subnet func(vpcId string) string
+ EdgeGatewayList func(vpcId string) string
+
+ DatabaseGet func(databaseId string) string
+ DatabaseCreate func() string
+ DatabaseDelete func(databaseId string) string
+ DatabaseStop func() string
+ DatabaseStart func() string
+
+ DedicatedFKEList func(vpcId string, page, pageSize int) string
+ DedicatedFKEGet func(vpcId string, clusterId string) string
+ DedicatedFKEUpgradeVersion func(vpcId string, clusterId string) string
+ DedicatedFKEManagement func(vpcId string, clusterId string) string
+
+ ManagedFKEList func(vpcId string, page int, pageSize int, infraType string) string
+ ManagedFKEGet func(vpcId string, platform string, clusterId string) string
+ ManagedFKEDelete func(vpcId string, platform string, clusterName string) string
+ ManagedFKECreate func(vpcId string, platform string) string
+ GetFKEOSVersion func(vpcId string, platform string) string
}{
SSH: "/v1/user/sshs",
Storage: func(vpcId string) string {
@@ -139,4 +159,64 @@ var ApiPath = struct {
ListSubnets: func(vpcId string) string {
return fmt.Sprintf("/v2/vpc/%s/networks", vpcId)
},
+
+ Subnet: func(vpcId string) string { return fmt.Sprintf("/v1/vmware/vpc/%s/network/subnets", vpcId) },
+
+ EdgeGatewayList: func(vpcId string) string {
+ return fmt.Sprintf("/v1/vmware/vpc/%s/edge_gateway/list", vpcId)
+ },
+
+ DatabaseGet: func(databaseId string) string {
+ return fmt.Sprintf("/v1/xplat/database/management/cluster/detail/%s", databaseId)
+ },
+ DatabaseCreate: func() string {
+ return "/v1/xplat/database/provision/create"
+ },
+ DatabaseDelete: func(databaseId string) string {
+ return fmt.Sprintf("/v1/xplat/database/provision/delete/%s", databaseId)
+ },
+ DatabaseStop: func() string {
+ return "/v1/xplat/database/management/cluster/stop"
+ },
+ DatabaseStart: func() string {
+ return "/v1/xplat/database/management/cluster/start"
+ },
+
+ DedicatedFKEList: func(vpcId string, page, pageSize int) string {
+ return fmt.Sprintf("/v1/xplat/fke/vpc/%s/kubernetes?page=%d&page_size=%d", vpcId, page, pageSize)
+ },
+ DedicatedFKEGet: func(vpcId string, clusterId string) string {
+ return fmt.Sprintf("/v1/xplat/fke/vpc/%s/cluster/%s?page=1&page_size=25", vpcId, clusterId)
+ },
+ DedicatedFKEUpgradeVersion: func(vpcId string, clusterId string) string {
+ return fmt.Sprintf("/v1/xplat/fke/vpc/%s/cluster/%s/upgrade-version", vpcId, clusterId)
+ },
+ DedicatedFKEManagement: func(vpcId string, clusterId string) string {
+ return fmt.Sprintf("/v1/xplat/fke/vpc/%s/kubernetes/%s/management", vpcId, clusterId)
+ },
+
+ ManagedFKEList: func(vpcId string, page int, pageSize int, infraType string) string {
+ return fmt.Sprintf("/v1/xplat/fke/vpc/%s/m-fke/%s/get-shoot-cluster/shoots?page=%d&page_size=%d", vpcId, infraType, page, pageSize)
+ },
+ ManagedFKEDelete: func(vpcId string, platform string, clusterName string) string {
+ return fmt.Sprintf(
+ "/v1/xplat/fke/vpc/%s/m-fke/%s/delete-shoot-cluster/shoots/%s",
+ vpcId, platform, clusterName,
+ )
+ },
+ ManagedFKECreate: func(vpcId string, platform string) string {
+ return fmt.Sprintf(
+ "/v1/xplat/fke/vpc/%s/m-fke/%s/create-cluster",
+ vpcId, platform,
+ )
+ },
+ ManagedFKEGet: func(vpcId string, platform string, clusterId string) string {
+ return fmt.Sprintf(
+ "/v1/xplat/fke/vpc/%s/m-fke/%s/get-shoot-specific/shoots/%s",
+ vpcId, platform, clusterId,
+ )
+ },
+ GetFKEOSVersion: func(vpcId string, platform string) string {
+ return fmt.Sprintf("/v1/xplat/fke/vpc/%s/m-fke/%s/get_k8s_versions", vpcId, platform)
+ },
}
diff --git a/commons/client.go b/commons/client.go
index 45293be..bbf9406 100644
--- a/commons/client.go
+++ b/commons/client.go
@@ -83,12 +83,12 @@ func NewClientWithURL(apiKey, apiUrl, region string, tenantName string) (*Client
return client, nil
}
-func (c *Client) prepareClientURL(requestURL string) *url.URL {
+func (c *Client) PrepareClientURL(requestURL string) *url.URL {
u, _ := url.Parse(c.BaseURL.String() + requestURL)
return u
}
-func (c *Client) sendRequest(req *http.Request) ([]byte, error) {
+func (c *Client) SendRequest(req *http.Request) ([]byte, error) {
req.Header.Set("Accept", "application/json")
req.Header.Set("User-Agent", c.UserAgent)
req.Header.Set("Content-Type", "application/json")
@@ -122,18 +122,18 @@ func (c *Client) sendRequest(req *http.Request) ([]byte, error) {
// SendGetRequest sends a correctly authenticated get request to the API server
func (c *Client) SendGetRequest(requestURL string) ([]byte, error) {
- u := c.prepareClientURL(requestURL)
+ u := c.PrepareClientURL(requestURL)
req, err := http.NewRequest("GET", u.String(), nil)
if err != nil {
return nil, err
}
- return c.sendRequest(req)
+ return c.SendRequest(req)
}
// SendPostRequest sends a correctly authenticated post request to the API server
func (c *Client) SendPostRequest(requestURL string, params interface{}) ([]byte, error) {
- u := c.prepareClientURL(requestURL)
+ u := c.PrepareClientURL(requestURL)
// we create a new buffer and encode everything to json to send it in the request
jsonValue, _ := json.Marshal(params)
@@ -142,12 +142,12 @@ func (c *Client) SendPostRequest(requestURL string, params interface{}) ([]byte,
if err != nil {
return nil, err
}
- return c.sendRequest(req)
+ return c.SendRequest(req)
}
// SendPutRequest sends a correctly authenticated put request to the API server
func (c *Client) SendPutRequest(requestURL string, params interface{}) ([]byte, error) {
- u := c.prepareClientURL(requestURL)
+ u := c.PrepareClientURL(requestURL)
// we create a new buffer and encode everything to json to send it in the request
jsonValue, _ := json.Marshal(params)
@@ -156,23 +156,23 @@ func (c *Client) SendPutRequest(requestURL string, params interface{}) ([]byte,
if err != nil {
return nil, err
}
- return c.sendRequest(req)
+ return c.SendRequest(req)
}
// SendDeleteRequest sends a correctly authenticated delete request to the API server
func (c *Client) SendDeleteRequest(requestURL string) ([]byte, error) {
- u := c.prepareClientURL(requestURL)
+ u := c.PrepareClientURL(requestURL)
req, err := http.NewRequest("DELETE", u.String(), nil)
if err != nil {
return nil, err
}
- return c.sendRequest(req)
+ return c.SendRequest(req)
}
// SendDeleteRequestWithBody sends a correctly authenticated delete request to the API server
func (c *Client) SendDeleteRequestWithBody(requestURL string, params interface{}) ([]byte, error) {
- u := c.prepareClientURL(requestURL)
+ u := c.PrepareClientURL(requestURL)
// we create a new buffer and encode everything to json to send it in the request
jsonValue, _ := json.Marshal(params)
@@ -182,7 +182,7 @@ func (c *Client) SendDeleteRequestWithBody(requestURL string, params interface{}
return nil, err
}
- return c.sendRequest(req)
+ return c.SendRequest(req)
}
// SetUserAgent sets the user agent for the client
diff --git a/docs/data-sources/dedicated_kubernetes_engine_v1.md b/docs/data-sources/dedicated_kubernetes_engine_v1.md
new file mode 100644
index 0000000..d33355d
--- /dev/null
+++ b/docs/data-sources/dedicated_kubernetes_engine_v1.md
@@ -0,0 +1,48 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "fptcloud_dedicated_kubernetes_engine_v1 Data Source - terraform-provider-fptcloud"
+subcategory: ""
+description: |-
+ Retrieves information about dedicated FKE clusters
+---
+
+# fptcloud_dedicated_kubernetes_engine_v1 (Data Source)
+
+Retrieves information about dedicated FKE clusters
+
+
+
+
+## Schema
+
+### Required
+
+- `cluster_id` (String) Cluster ID, as shown on the dashboard, usually has a length of 8 characters
+- `vpc_id` (String) VPC ID
+
+### Read-Only
+
+- `cluster_name` (String) Cluster name
+- `edge_id` (String) Edge ID
+- `id` (String) UUID of the cluster
+- `ip_private_firewall` (String) IP private firewall
+- `ip_public_firewall` (String) IP public firewall
+- `k8s_version` (String) Kubernetes version
+- `lb_size` (String) Load balancer size
+- `master_count` (Number) Number of master node
+- `master_disk_size` (Number) Master node disk capacity in GB
+- `master_type` (String) ID of the flavor of master node
+- `max_pod_per_node` (Number) Max pods per node
+- `network_id` (String) Network UUID
+- `network_node_prefix` (Number) Network node prefix
+- `nfs_disk_size` (Number) NFS disk size
+- `nfs_status` (String) NFS status
+- `node_dns` (String) DNS server of nodes
+- `pod_network` (String) Pod network in CIDR notation
+- `region_id` (String) Region ID
+- `scale_max` (Number) Maximum number of nodes for autoscaling
+- `scale_min` (Number) Minimum number of nodes for autoscaling
+- `service_network` (String) Service network in CIDR notation
+- `storage_policy` (String) Storage policy
+- `worker_disk_size` (Number) Worker node disk capacity in GB
+- `worker_type` (String) ID of the flavor of worker node
diff --git a/docs/data-sources/edge_gateway.md b/docs/data-sources/edge_gateway.md
new file mode 100644
index 0000000..75a13dd
--- /dev/null
+++ b/docs/data-sources/edge_gateway.md
@@ -0,0 +1,26 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "fptcloud_edge_gateway Data Source - terraform-provider-fptcloud"
+subcategory: ""
+description: |-
+ Retrieves information about FPT Cloud edge gateway
+---
+
+# fptcloud_edge_gateway (Data Source)
+
+Retrieves information about FPT Cloud edge gateway
+
+
+
+
+## Schema
+
+### Required
+
+- `name` (String) Name of the compute edge_gateway
+- `vpc_id` (String) VPC id
+
+### Read-Only
+
+- `edge_gateway_id` (String) Edge gateway id
+- `id` (String) Identifier of the edge_gateway
diff --git a/docs/data-sources/managed_kubernetes_engine_v1.md b/docs/data-sources/managed_kubernetes_engine_v1.md
new file mode 100644
index 0000000..db833a6
--- /dev/null
+++ b/docs/data-sources/managed_kubernetes_engine_v1.md
@@ -0,0 +1,53 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "fptcloud_managed_kubernetes_engine_v1 Data Source - terraform-provider-fptcloud"
+subcategory: ""
+description: |-
+ Manage managed FKE clusters.
+---
+
+# fptcloud_managed_kubernetes_engine_v1 (Data Source)
+
+Manage managed FKE clusters.
+
+
+
+
+## Schema
+
+### Required
+
+- `cluster_name` (String)
+- `vpc_id` (String)
+
+### Read-Only
+
+- `id` (String) The ID of this resource.
+- `k8s_max_pod` (Number)
+- `k8s_version` (String)
+- `load_balancer_type` (String)
+- `network_node_prefix` (Number)
+- `pod_network` (String)
+- `pod_prefix` (String)
+- `pools` (Block List) (see [below for nested schema](#nestedblock--pools))
+- `purpose` (String)
+- `range_ip_lb_end` (String)
+- `range_ip_lb_start` (String)
+- `service_network` (String)
+- `service_prefix` (String)
+
+
+### Nested Schema for `pools`
+
+Read-Only:
+
+- `auto_scale` (Boolean)
+- `is_enable_auto_repair` (Boolean)
+- `name` (String)
+- `network_id` (String)
+- `network_name` (String)
+- `scale_max` (Number)
+- `scale_min` (Number)
+- `storage_profile` (String)
+- `worker_disk_size` (Number)
+- `worker_type` (String)
diff --git a/docs/data-sources/subnet.md b/docs/data-sources/subnet.md
index 9f9441b..36358c7 100644
--- a/docs/data-sources/subnet.md
+++ b/docs/data-sources/subnet.md
@@ -81,5 +81,6 @@ Read-Only:
- `edge_gateway` (Map of String)
- `gateway` (String)
- `id` (String)
+- `network_id` (String)
- `name` (String)
- `network_name` (String)
diff --git a/docs/resources/database.md b/docs/resources/database.md
new file mode 100644
index 0000000..6e9f7fd
--- /dev/null
+++ b/docs/resources/database.md
@@ -0,0 +1,49 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "fptcloud_database Resource - terraform-provider-fptcloud"
+subcategory: ""
+description: |-
+ Provides a Fpt database cluster which can be used to store data.
+---
+
+# fptcloud_database (Resource)
+
+Provides a Fpt database cluster which can be used to store data.
+
+
+
+
+## Schema
+
+### Required
+
+- `admin_password` (String) The admin password of the database cluster.
+- `cluster_name` (String) The name of the database cluster.
+- `data_disk_size` (Number) The size of the data disk in each node of the database cluster.
+- `database_name` (String) The name of the database in the database cluster.
+- `domain_name` (String) The domain name of the database cluster.
+- `edge_id` (String) The edge Id of the database cluster.
+- `edition` (String) The edition of the database cluster.
+- `flavor` (String) The flavor of the database cluster.
+- `is_cluster` (String) The cluster status of the database cluster.
+- `is_ops` (String) Whether the database is OpenStack or VMware
+- `is_public` (String) Whether the database is public or not.
+- `master_count` (Number) The number of master nodes in the database cluster.
+- `network_id` (String) The network Id of the database cluster.
+- `node_core` (Number) The number of cores in each node of the database cluster.
+- `node_cpu` (Number) The number of CPUs in each node of the database cluster.
+- `node_ram` (Number) The amount of RAM in each node of the database cluster.
+- `number_of_node` (Number) The number of nodes in the database cluster.
+- `storage_profile` (String) The storage profile of the database cluster.
+- `type_config` (String) The type of configuration of the database cluster (short-config or custom-config).
+- `type_db` (String) The type of database of the database cluster
+- `vdc_name` (String) The VDC name of the database cluster.
+- `version` (String) The version of the database cluster.
+- `vhost_name` (String) The name of the RabbitMQ database.
+- `vm_network` (String) The VM network of the database cluster.
+- `vpc_id` (String) The VPC Id of the database cluster.
+- `worker_count` (Number) The number of worker nodes in the database cluster.
+
+### Read-Only
+
+- `id` (String) The Id of the database cluster.
diff --git a/docs/resources/database_status.md b/docs/resources/database_status.md
new file mode 100644
index 0000000..e79ea28
--- /dev/null
+++ b/docs/resources/database_status.md
@@ -0,0 +1,21 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "fptcloud_database_status Resource - terraform-provider-fptcloud"
+subcategory: ""
+description: |-
+ Provides a Fpt database cluster status to temporarily stop or start a database.
+---
+
+# fptcloud_database_status (Resource)
+
+Provides a Fpt database cluster status to temporarily stop or start a database.
+
+
+
+
+## Schema
+
+### Required
+
+- `id` (String) The Id of the database cluster.
+- `status` (String) The status of the database cluster, must be 'running' or 'stopped'.
diff --git a/docs/resources/dedicated_kubernetes_engine_v1.md b/docs/resources/dedicated_kubernetes_engine_v1.md
new file mode 100644
index 0000000..8f35710
--- /dev/null
+++ b/docs/resources/dedicated_kubernetes_engine_v1.md
@@ -0,0 +1,48 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "fptcloud_dedicated_kubernetes_engine_v1 Resource - terraform-provider-fptcloud"
+subcategory: ""
+description: |-
+ Manage dedicated FKE clusters.
+---
+
+# fptcloud_dedicated_kubernetes_engine_v1 (Resource)
+
+Manage dedicated FKE clusters.
+
+
+
+
+## Schema
+
+### Required
+
+- `cluster_name` (String) Cluster name
+- `edge_id` (String) Edge ID
+- `ip_private_firewall` (String) IP private firewall
+- `ip_public_firewall` (String) IP public firewall
+- `k8s_version` (String) Kubernetes version
+- `lb_size` (String) Load balancer size
+- `master_count` (Number) Number of master nodes
+- `master_disk_size` (Number) Disk size of master node in GB
+- `master_type` (String) Flavor ID of master node
+- `max_pod_per_node` (Number) Max pods per node
+- `network_id` (String) Disk size of worker node in GB
+- `network_node_prefix` (Number) Network node prefix
+- `nfs_disk_size` (Number) NFS disk size
+- `nfs_status` (String) NFS status
+- `node_dns` (String) DNS server of nodes
+- `pod_network` (String) Pod network in CIDR notation
+- `region_id` (String) Region ID
+- `scale_max` (Number) Maximum number of nodes for autoscaling
+- `scale_min` (Number) Minimum number of nodes for autoscaling
+- `service_network` (String) Service network in CIDR notation
+- `storage_policy` (String) Storage policy
+- `vpc_id` (String) VPC ID
+- `worker_disk_size` (Number) Disk size of worker node in GB
+- `worker_type` (String) Flavor ID of worker node
+
+### Read-Only
+
+- `cluster_id` (String) Cluster slug
+- `id` (String) Cluster UUID
diff --git a/docs/resources/dedicated_kubernetes_engine_v1_state.md b/docs/resources/dedicated_kubernetes_engine_v1_state.md
new file mode 100644
index 0000000..61c2ffb
--- /dev/null
+++ b/docs/resources/dedicated_kubernetes_engine_v1_state.md
@@ -0,0 +1,25 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "fptcloud_dedicated_kubernetes_engine_v1_state Resource - terraform-provider-fptcloud"
+subcategory: ""
+description: |-
+ Manage dedicated FKE cluster state
+---
+
+# fptcloud_dedicated_kubernetes_engine_v1_state (Resource)
+
+Manage dedicated FKE cluster state
+
+
+
+
+## Schema
+
+### Required
+
+- `is_running` (Boolean)
+- `vpc_id` (String)
+
+### Read-Only
+
+- `id` (String) The ID of this resource.
diff --git a/docs/resources/managed_kubernetes_engine_v1.md b/docs/resources/managed_kubernetes_engine_v1.md
new file mode 100644
index 0000000..6ebe75e
--- /dev/null
+++ b/docs/resources/managed_kubernetes_engine_v1.md
@@ -0,0 +1,56 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "fptcloud_managed_kubernetes_engine_v1 Resource - terraform-provider-fptcloud"
+subcategory: ""
+description: |-
+ Manage managed FKE clusters.
+---
+
+# fptcloud_managed_kubernetes_engine_v1 (Resource)
+
+Manage managed FKE clusters.
+
+
+
+
+## Schema
+
+### Required
+
+- `cluster_name` (String) Cluster name
+- `k8s_max_pod` (Number) Max pods per node
+- `k8s_version` (String) Kubernetes version
+- `load_balancer_type` (String) Load balancer type
+- `network_node_prefix` (Number) Network node prefix
+- `pod_network` (String) Pod network (subnet ID)
+- `pod_prefix` (String) Pod network (prefix)
+- `purpose` (String) Cluster purpose
+- `range_ip_lb_end` (String) IP stop for range of LB
+- `range_ip_lb_start` (String) IP start for range of LB
+- `service_network` (String) Service network (subnet ID)
+- `service_prefix` (String) Service prefix (prefix)
+- `vpc_id` (String) VPC ID
+
+### Optional
+
+- `pools` (Block List) (see [below for nested schema](#nestedblock--pools))
+
+### Read-Only
+
+- `id` (String) The ID of this resource.
+
+
+### Nested Schema for `pools`
+
+Required:
+
+- `auto_scale` (Boolean) Whether to enable autoscaling
+- `is_enable_auto_repair` (Boolean) Whether to enable auto-repair
+- `name` (String) Pool name
+- `network_id` (String) Subnet ID
+- `network_name` (String) Subnet name
+- `scale_max` (Number) Maximum number of nodes for autoscaling
+- `scale_min` (Number) Minimum number of nodes for autoscaling
+- `storage_profile` (String) Pool storage profile
+- `worker_disk_size` (Number) Worker disk size
+- `worker_type` (String) Worker flavor ID
diff --git a/examples/data-sources/fptcloud_edge_gateway/data-source.tf b/examples/data-sources/fptcloud_edge_gateway/data-source.tf
new file mode 100644
index 0000000..bc48f6b
--- /dev/null
+++ b/examples/data-sources/fptcloud_edge_gateway/data-source.tf
@@ -0,0 +1,7 @@
+data "fptcloud_edge_gateway" "example" {
+ vpc_id = "your_vpc_id"
+ name = "your_edge_gateway_name"
+}
+output "show_value" {
+ value = data.fptcloud_edge_gateway.example
+}
\ No newline at end of file
diff --git a/examples/resources/fptcloud_dfke/resource.tf b/examples/resources/fptcloud_dfke/resource.tf
new file mode 100644
index 0000000..a6b81a2
--- /dev/null
+++ b/examples/resources/fptcloud_dfke/resource.tf
@@ -0,0 +1,30 @@
+resource "fptcloud_dedicated_kubernetes_engine_v1" "test" {
+ cluster_name = "terraform-test-3"
+ k8s_version = "v1.25.6"
+ # master_type = data.fptcloud_flavor_v1.master.id
+ master_type = "c89d97cd-c9cb-4d70-a0c1-01f190ea1b02"
+ master_count = 1
+ master_disk_size = 76
+ # worker_type = data.fptcloud_flavor_v1.worker.id
+ # worker_type = "5ca3036e-85d6-497f-a37b-076aa8b9adde"
+ worker_type = "c89d97cd-c9cb-4d70-a0c1-01f190ea1b02"
+ worker_disk_size = 103
+ # network_id = data.fptcloud_subnet_v1.xplat_network.id
+ network_id = "urn:vcloud:network:11980234-8474-4e2e-8925-8087177a43ca"
+ lb_size = "standard"
+ pod_network = "10.244.0.0/16"
+ service_network = "172.30.0.0/16"
+ network_node_prefix = 23
+ max_pod_per_node = 110
+ nfs_status = ""
+ nfs_disk_size = 100
+ storage_policy = "Premium-SSD-4000"
+ edge_id = "4d4bfe05-af32-4354-b20a-de814c8b3713"
+ scale_min = 1
+ scale_max = 1
+ node_dns = "1.1.1.1"
+ ip_public_firewall = ""
+ ip_private_firewall = ""
+ vpc_id = "188af427-269b-418a-90bb-0cb27afc6c1e"
+ region_id = "saigon-vn"
+}
\ No newline at end of file
diff --git a/examples/resources/fptcloud_dfke_state/resource.tf b/examples/resources/fptcloud_dfke_state/resource.tf
new file mode 100644
index 0000000..2abb183
--- /dev/null
+++ b/examples/resources/fptcloud_dfke_state/resource.tf
@@ -0,0 +1,5 @@
+resource "fptcloud_dedicated_kubernetes_engine_v1_state" "test_state" {
+ id = "your-cluster-uuid"
+ vpc_id = "your-vpc-id"
+ is_running = true
+}
\ No newline at end of file
diff --git a/fptcloud/database/resource_database.go b/fptcloud/database/resource_database.go
new file mode 100644
index 0000000..479d804
--- /dev/null
+++ b/fptcloud/database/resource_database.go
@@ -0,0 +1,614 @@
+package fptcloud_database
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ diag2 "github.com/hashicorp/terraform-plugin-framework/diag"
+ "github.com/hashicorp/terraform-plugin-framework/resource"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/int64planmodifier"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+ "github.com/hashicorp/terraform-plugin-log/tflog"
+ "strconv"
+ common "terraform-provider-fptcloud/commons"
+ "time"
+)
+
+var (
+ _ resource.Resource = &resourceDatabase{}
+ _ resource.ResourceWithConfigure = &resourceDatabase{}
+ _ resource.ResourceWithImportState = &resourceDatabase{}
+
+ forceNewPlanModifiersString = []planmodifier.String{
+ stringplanmodifier.RequiresReplace(),
+ }
+
+ forceNewPlanModifiersInt = []planmodifier.Int64{
+ int64planmodifier.RequiresReplace(),
+ }
+)
+
+const (
+ errorCallingApi = "Error calling API"
+)
+
+type resourceDatabase struct {
+ client *common.Client
+}
+
+type databaseResourceModel struct {
+ Id types.String `tfsdk:"id" json:"id,omitempty"`
+ VpcId types.String `tfsdk:"vpc_id" json:"vpc_id"`
+ NetworkId types.String `tfsdk:"network_id" json:"network_id"`
+ VmNetwork types.String `tfsdk:"vm_network" json:"vm_network"`
+ TypeConfig types.String `tfsdk:"type_config" json:"type_config"`
+ TypeDb types.String `tfsdk:"type_db" json:"type_db"`
+ Version types.String `tfsdk:"version" json:"version"`
+ VdcName types.String `tfsdk:"vdc_name" json:"vdc_name"`
+ IsCluster types.String `tfsdk:"is_cluster" json:"is_cluster"`
+ MasterCount types.Int64 `tfsdk:"master_count" json:"master_count"`
+ WorkerCount types.Int64 `tfsdk:"worker_count" json:"worker_count"`
+ NodeCpu types.Int64 `tfsdk:"node_cpu" json:"node_cpu"`
+ NodeCore types.Int64 `tfsdk:"node_core" json:"node_core"`
+ NodeRam types.Int64 `tfsdk:"node_ram" json:"node_ram"`
+ DataDiskSize types.Int64 `tfsdk:"data_disk_size" json:"data_disk_size"`
+ ClusterName types.String `tfsdk:"cluster_name" json:"cluster_name"`
+ DatabaseName types.String `tfsdk:"database_name" json:"database_name"`
+ VhostName types.String `tfsdk:"vhost_name" json:"vhost_name"`
+ IsPublic types.String `tfsdk:"is_public" json:"is_public"`
+ AdminPassword types.String `tfsdk:"admin_password" json:"admin_password"`
+ StorageProfile types.String `tfsdk:"storage_profile" json:"storage_profile"`
+ EdgeId types.String `tfsdk:"edge_id" json:"edge_id"`
+ Edition types.String `tfsdk:"edition" json:"edition"`
+ IsOps types.String `tfsdk:"is_ops" json:"is_ops"`
+ Flavor types.String `tfsdk:"flavor" json:"flavor"`
+ NumberOfNode types.Int64 `tfsdk:"number_of_node" json:"number_of_node"`
+ DomainName types.String `tfsdk:"domain_name" json:"domain_name"`
+}
+
+var timeout = 1800 * time.Second
+
+func NewResourceDatabase() resource.Resource {
+ return &resourceDatabase{}
+}
+
+func (r *resourceDatabase) Metadata(ctx context.Context, request resource.MetadataRequest, response *resource.MetadataResponse) {
+ response.TypeName = request.ProviderTypeName + "_database"
+}
+
+func (r *resourceDatabase) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) {
+ // Get current state of the resource
+ var currentState databaseResourceModel
+ diags := request.Plan.Get(ctx, ¤tState)
+
+ response.Diagnostics.Append(diags...)
+ if response.Diagnostics.HasError() {
+ return
+ }
+
+ f := databaseJson{}
+ r.remap(¤tState, &f)
+ _, err := json.Marshal(f)
+ if err != nil {
+ response.Diagnostics.Append(diag2.NewErrorDiagnostic("Error marshalling JSON", err.Error()))
+ return
+ }
+
+ // Send API request to create the database
+ client := r.client
+ path := common.ApiPath.DatabaseCreate()
+
+ a, err := client.SendPostRequest(path, f)
+
+ tflog.Info(ctx, "Creating database cluster")
+ tflog.Info(ctx, "Request body: "+fmt.Sprintf("%+v", f))
+ tflog.Info(ctx, "Response: "+string(a))
+
+ if err != nil {
+ response.Diagnostics.Append(diag2.NewErrorDiagnostic(errorCallingApi, err.Error()))
+ return
+ }
+
+ errorResponse := r.checkForError(a)
+ if errorResponse != nil {
+ response.Diagnostics.Append(errorResponse)
+ return
+ }
+
+ var createResponse databaseCreateResponse
+ if err = json.Unmarshal(a, &createResponse); err != nil {
+ response.Diagnostics.Append(diag2.NewErrorDiagnostic("Error unmarshalling response", err.Error()))
+ return
+ }
+
+ // Update new state of resource to terraform state
+ if err = r.internalRead(ctx, createResponse.Data.ClusterId, ¤tState); err != nil {
+ response.Diagnostics.Append(diag2.NewErrorDiagnostic("Error reading database currentState", err.Error()))
+ return
+ }
+ currentState.Flavor = types.StringValue(f.Flavor)
+ currentState.IsOps = types.StringValue(f.IsOps)
+ currentState.IsPublic = types.StringValue(f.IsPublic)
+ currentState.VhostName = types.StringValue(f.VhostName)
+ diags = response.State.Set(ctx, ¤tState)
+ response.Diagnostics.Append(diags...)
+ if response.Diagnostics.HasError() {
+ return
+ }
+}
+
+func (r *resourceDatabase) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) {
+ var state databaseResourceModel
+ diags := request.State.Get(ctx, &state)
+
+ response.Diagnostics.Append(diags...)
+ if response.Diagnostics.HasError() {
+ return
+ }
+
+ err := r.internalRead(ctx, state.Id.ValueString(), &state)
+ if err != nil {
+ response.Diagnostics.Append(diag2.NewErrorDiagnostic(errorCallingApi, err.Error()))
+ return
+ }
+
+ diags = response.State.Set(ctx, &state)
+ response.Diagnostics.Append(diags...)
+ if response.Diagnostics.HasError() {
+ return
+ }
+}
+
+func (r *resourceDatabase) Update(ctx context.Context, request resource.UpdateRequest, response *resource.UpdateResponse) {
+ panic("implement me")
+}
+
+func (r *resourceDatabase) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) {
+ var state databaseResourceModel
+ diags := request.State.Get(ctx, &state)
+
+ response.Diagnostics.Append(diags...)
+ if response.Diagnostics.HasError() {
+ return
+ }
+
+ path := common.ApiPath.DatabaseDelete(state.Id.ValueString())
+ _, err := r.client.SendDeleteRequest(path)
+ if err != nil {
+ response.Diagnostics.Append(diag2.NewErrorDiagnostic(errorCallingApi, err.Error()))
+ return
+ }
+}
+
+func (r *resourceDatabase) Schema(ctx context.Context, request resource.SchemaRequest, response *resource.SchemaResponse) {
+ response.Schema = schema.Schema{
+ Description: "Provides a Fpt database cluster which can be used to store data.",
+ Attributes: map[string]schema.Attribute{
+ "id": schema.StringAttribute{
+ Computed: true,
+ Description: "The Id of the database cluster.",
+ },
+ "vpc_id": schema.StringAttribute{
+ Required: true,
+ PlanModifiers: forceNewPlanModifiersString,
+ Description: "The VPC Id of the database cluster.",
+ },
+ "network_id": schema.StringAttribute{
+ Required: true,
+ PlanModifiers: forceNewPlanModifiersString,
+ Description: "The network Id of the database cluster.",
+ },
+ "vm_network": schema.StringAttribute{
+ Required: true,
+ PlanModifiers: forceNewPlanModifiersString,
+ Description: "The VM network of the database cluster.",
+ },
+ "type_config": schema.StringAttribute{
+ Required: true,
+ PlanModifiers: forceNewPlanModifiersString,
+ Description: "The type of configuration of the database cluster (short-config or custom-config).",
+ },
+ "type_db": schema.StringAttribute{
+ Required: true,
+ PlanModifiers: forceNewPlanModifiersString,
+ Description: "The type of database of the database cluster",
+ },
+ "version": schema.StringAttribute{
+ Required: true,
+ PlanModifiers: forceNewPlanModifiersString,
+ Description: "The version of the database cluster.",
+ },
+ "vdc_name": schema.StringAttribute{
+ Required: true,
+ PlanModifiers: forceNewPlanModifiersString,
+ Description: "The VDC name of the database cluster.",
+ },
+ "is_cluster": schema.StringAttribute{
+ Required: true,
+ PlanModifiers: forceNewPlanModifiersString,
+ Description: "The cluster status of the database cluster.",
+ },
+ "master_count": schema.Int64Attribute{
+ Required: true,
+ PlanModifiers: forceNewPlanModifiersInt,
+ Description: "The number of master nodes in the database cluster.",
+ },
+ "worker_count": schema.Int64Attribute{
+ Required: true,
+ PlanModifiers: forceNewPlanModifiersInt,
+ Description: "The number of worker nodes in the database cluster.",
+ },
+ "node_cpu": schema.Int64Attribute{
+ Required: true,
+ PlanModifiers: forceNewPlanModifiersInt,
+ Description: "The number of CPUs in each node of the database cluster.",
+ },
+ "node_core": schema.Int64Attribute{
+ Required: true,
+ PlanModifiers: forceNewPlanModifiersInt,
+ Description: "The number of cores in each node of the database cluster.",
+ },
+ "node_ram": schema.Int64Attribute{
+ Required: true,
+ PlanModifiers: forceNewPlanModifiersInt,
+ Description: "The amount of RAM in each node of the database cluster.",
+ },
+ "data_disk_size": schema.Int64Attribute{
+ Required: true,
+ PlanModifiers: forceNewPlanModifiersInt,
+ Description: "The size of the data disk in each node of the database cluster.",
+ },
+ "cluster_name": schema.StringAttribute{
+ Required: true,
+ PlanModifiers: forceNewPlanModifiersString,
+ Description: "The name of the database cluster.",
+ },
+ "database_name": schema.StringAttribute{
+ Required: true,
+ PlanModifiers: forceNewPlanModifiersString,
+ Description: "The name of the database in the database cluster.",
+ },
+ "vhost_name": schema.StringAttribute{
+ Required: true,
+ PlanModifiers: forceNewPlanModifiersString,
+ Description: "The name of the RabbitMQ database.",
+ },
+ "is_public": schema.StringAttribute{
+ Required: true,
+ PlanModifiers: forceNewPlanModifiersString,
+ Description: "Whether the database is public or not.",
+ },
+ "admin_password": schema.StringAttribute{
+ Required: true,
+ PlanModifiers: forceNewPlanModifiersString,
+ Description: "The admin password of the database cluster.",
+ },
+ "storage_profile": schema.StringAttribute{
+ Required: true,
+ PlanModifiers: forceNewPlanModifiersString,
+ Description: "The storage profile of the database cluster.",
+ },
+ "edge_id": schema.StringAttribute{
+ Required: true,
+ PlanModifiers: forceNewPlanModifiersString,
+ Description: "The edge Id of the database cluster.",
+ },
+ "edition": schema.StringAttribute{
+ Required: true,
+ PlanModifiers: forceNewPlanModifiersString,
+ Description: "The edition of the database cluster.",
+ },
+ "is_ops": schema.StringAttribute{
+ Required: true,
+ PlanModifiers: forceNewPlanModifiersString,
+ Description: "Whether the database is OpenStack or VMware",
+ },
+ "flavor": schema.StringAttribute{
+ Required: true,
+ PlanModifiers: forceNewPlanModifiersString,
+ Description: "The flavor of the database cluster.",
+ },
+ "number_of_node": schema.Int64Attribute{
+ Required: true,
+ PlanModifiers: forceNewPlanModifiersInt,
+ Description: "The number of nodes in the database cluster.",
+ },
+ "domain_name": schema.StringAttribute{
+ Required: true,
+ PlanModifiers: forceNewPlanModifiersString,
+ Description: "The domain name of the database cluster.",
+ },
+ },
+ }
+}
+
+func (r *resourceDatabase) ImportState(ctx context.Context, request resource.ImportStateRequest, response *resource.ImportStateResponse) {
+ tflog.Info(ctx, "Importing cluster Id "+request.ID)
+ var state databaseResourceModel
+
+ state.Id = types.StringValue(request.ID)
+ err := r.internalRead(ctx, request.ID, &state)
+ if err != nil {
+ response.Diagnostics.Append(diag2.NewErrorDiagnostic("Error calling API in Import State Method", err.Error()))
+ return
+ }
+
+ diags := response.State.Set(ctx, &state)
+ response.Diagnostics.Append(diags...)
+ if response.Diagnostics.HasError() {
+ return
+ }
+}
+
+func (r *resourceDatabase) Configure(ctx context.Context, request resource.ConfigureRequest, response *resource.ConfigureResponse) {
+ tflog.Info(ctx, "Configuring")
+ if request.ProviderData == nil {
+ return
+ }
+
+ client, ok := request.ProviderData.(*common.Client)
+ if !ok {
+ response.Diagnostics.AddError(
+ "Unexpected Resource Configure Type",
+ fmt.Sprintf("Expected *internal.ClientV1, got: %T. Please report this issue to the provider developers.", request.ProviderData),
+ )
+
+ return
+ }
+
+ r.client = client
+}
+
+// Get resource data from API, then update to terrafrom state
+func (r *resourceDatabase) internalRead(ctx context.Context, databaseId string, state *databaseResourceModel) error {
+ vpcId := state.VpcId.ValueString()
+ tflog.Info(ctx, "Reading state of Database Id "+databaseId+", VPC Id "+vpcId)
+
+ var nodeTotal = 0
+ var timeStart = time.Now()
+ var status = "undefined"
+ var node databaseNode
+ var cluster databaseData
+
+ for nodeTotal == 0 && time.Since(timeStart) < timeout && status != "failed" {
+ // Get database detail from API by database Id
+ a, err := r.client.SendGetRequest(common.ApiPath.DatabaseGet(databaseId))
+ if err != nil {
+ return err
+ }
+
+ // Convert response to Go struct
+ var d databaseReadResponse
+ err = json.Unmarshal(a, &d)
+ cluster = d.Data.Cluster
+ node = d.Data.Node
+ status = cluster.Status
+ if err != nil {
+ return err
+ }
+
+ // Update node_total
+ nodeTotal = int(node.Total)
+ if node.Total == 0 {
+ tflog.Info(ctx, "Waiting for nodes to be provisioned. Time waited: "+strconv.Itoa(int(time.Since(timeStart).Seconds()))+" seconds.")
+ time.Sleep(30 * time.Second)
+ }
+ }
+
+ if status == "failed" {
+ return fmt.Errorf("Failed to provision nodes for database! Server error")
+ } else if nodeTotal == 0 {
+ return fmt.Errorf("Request time out! Can not provision nodes for database")
+ } else {
+ // Update resource status to state
+ state.VpcId = types.StringValue(cluster.VpcId)
+ state.NetworkId = types.StringValue(cluster.NetworkId)
+ state.VmNetwork = types.StringValue(cluster.VmNetwork)
+ state.Id = types.StringValue(cluster.ClusterId)
+ state.TypeConfig = types.StringValue(cluster.TypeConfig)
+ state.TypeDb = types.StringValue(cluster.TypeDb)
+ state.Version = types.StringValue(cluster.Version)
+ state.IsCluster = types.StringValue(cluster.IsCluster)
+ state.MasterCount = types.Int64Value(int64(cluster.MasterCount))
+ state.WorkerCount = types.Int64Value(int64(cluster.WorkerCount))
+ state.NodeCore = types.Int64Value(int64(cluster.NodeCore))
+ state.NodeCpu = types.Int64Value(int64(cluster.NodeCpu))
+ state.NodeRam = types.Int64Value(int64(cluster.NodeRam))
+ state.DataDiskSize = types.Int64Value(int64(cluster.DataDiskSize))
+ state.ClusterName = types.StringValue(cluster.ClusterName)
+ state.DatabaseName = types.StringValue(cluster.DatabaseName)
+ //state.VhostName = types.StringValue(cluster.VhostName)
+ //state.IsPublic = types.StringValue(cluster.IsPublic)
+ state.AdminPassword = types.StringValue(cluster.AdminPassword)
+ state.StorageProfile = types.StringValue(cluster.StorageProfile)
+ state.EdgeId = types.StringValue(cluster.EdgeId)
+ state.Edition = types.StringValue(cluster.EngineEdition)
+ //state.IsOps = types.StringValue(cluster.IsOps)
+ //state.Flavor = types.StringValue(cluster.Flavor)
+ state.NumberOfNode = types.Int64Value(int64(cluster.MasterCount) + int64(cluster.WorkerCount))
+ state.DomainName = types.StringValue("")
+ state.VdcName = types.StringValue(node.Items[0].VdcName)
+ }
+ return nil
+}
+
+// Map data from databaseResourceModel to databaseJson
+func (r *resourceDatabase) remap(from *databaseResourceModel, to *databaseJson) {
+ to.VpcId = from.VpcId.ValueString()
+ to.NetworkId = from.NetworkId.ValueString()
+ to.VmNetwork = from.VmNetwork.ValueString()
+ to.TypeConfig = from.TypeConfig.ValueString()
+ to.TypeDb = from.TypeDb.ValueString()
+ to.Version = from.Version.ValueString()
+ to.VdcName = from.VdcName.ValueString()
+ to.IsCluster = from.IsCluster.ValueString()
+ to.MasterCount = int(from.MasterCount.ValueInt64())
+ to.WorkerCount = int(from.WorkerCount.ValueInt64())
+ to.NodeCore = int(from.NodeCore.ValueInt64())
+
+ to.NodeCpu = int(from.NodeCpu.ValueInt64())
+ to.NodeRam = int(from.NodeRam.ValueInt64())
+
+ to.DataDiskSize = int(from.DataDiskSize.ValueInt64())
+ to.ClusterName = from.ClusterName.ValueString()
+ to.DatabaseName = from.DatabaseName.ValueString()
+ to.VhostName = from.VhostName.ValueString()
+ to.IsPublic = from.IsPublic.ValueString()
+ to.AdminPassword = from.AdminPassword.ValueString()
+ to.StorageProfile = from.StorageProfile.ValueString()
+ to.EdgeId = from.EdgeId.ValueString()
+ to.Edition = from.Edition.ValueString()
+
+ to.IsOps = from.IsOps.ValueString()
+ to.Flavor = from.Flavor.ValueString()
+
+ to.NumberOfNode = int(from.NumberOfNode.ValueInt64())
+ to.DomainName = from.DomainName.ValueString()
+}
+
+// Check if the response contains an error
+func (r *resourceDatabase) checkForError(a []byte) *diag2.ErrorDiagnostic {
+ var re map[string]interface{}
+ err := json.Unmarshal(a, &re)
+ if err != nil {
+ res := diag2.NewErrorDiagnostic("Error unmarshalling response", err.Error())
+ return &res
+ }
+
+ if _, ok := re["error"]; ok {
+ res := diag2.NewErrorDiagnostic("Response contained an error field", "Response body was "+string(a))
+ return &res
+ }
+
+ return nil
+}
+
+// dang Json de cho vao request gui len API
+type databaseJson struct {
+ Id string `json:"id,omitempty"`
+ VpcId string `json:"vpc_id"`
+ NetworkId string `json:"network_id"`
+ VmNetwork string `json:"vm_network"`
+ TypeConfig string `json:"type_config"`
+ TypeDb string `json:"type_db"`
+ Version string `json:"version"`
+ VdcName string `json:"vdc_name"`
+ IsCluster string `json:"is_cluster"`
+ MasterCount int `json:"master_count"`
+ WorkerCount int `json:"worker_count"`
+ NodeCpu int `json:"node_cpu"`
+ NodeCore int `json:"node_core"`
+ NodeRam int `json:"node_ram"`
+ DataDiskSize int `json:"data_disk_size"`
+ ClusterName string `json:"cluster_name"`
+ DatabaseName string `json:"database_name"`
+ VhostName string `json:"vhost_name"`
+ IsPublic string `json:"is_public"`
+ AdminPassword string `json:"admin_password"`
+ StorageProfile string `json:"storage_profile"`
+ EdgeId string `json:"edge_id"`
+ Edition string `json:"edition"`
+ IsOps string `json:"is_ops"`
+ Flavor string `json:"flavor"`
+ NumberOfNode int `json:"number_of_node"`
+ DomainName string `json:"domain_name"`
+}
+
+// Cluster data of a database when request a database's detail
+type databaseData struct {
+ VpcId string `json:"vpc_id"`
+ OrgName string `json:"org_name"`
+ VcdUrl string `json:"vcd_url"`
+ NetworkId string `json:"network_id"`
+ VmNetwork string `json:"vm_network"`
+ StorageProfile string `json:"storage_profile"`
+ EdgeId string `json:"edge_id"`
+ Flavor string `json:"flavor"`
+ ClusterId string `json:"cluster_id"`
+ ClusterName string `json:"cluster_name"`
+ Version string `json:"version"`
+ TypeConfig string `json:"type_config"`
+ TypeDb string `json:"type_db"`
+ EngineDb string `json:"engine_db"`
+ PortDb string `json:"port_db"`
+ EndPoint string `json:"end_point"`
+ MasterCount int `json:"master_count"`
+ WorkerCount int `json:"worker_count"`
+ IsCluster string `json:"is_cluster"`
+ IsMonitor bool `json:"is_monitor"`
+ IsBackup bool `json:"is_backup"`
+ NodeCpu int `json:"node_cpu"`
+ NodeCore int `json:"node_core"`
+ NodeRam int `json:"node_ram"`
+ DataDiskSize int `json:"data_disk_size"`
+ IpPublic string `json:"ip_public"`
+ Status string `json:"status"`
+ DatabaseName string `json:"database_name"`
+ VhostName string `json:"vhost_name"`
+ IsPublic string `json:"is_public"`
+ IsOps string `json:"is_ops"`
+ AdminPassword string `json:"admin_password"`
+ SourceClusterId string `json:"source_cluster_id"`
+ EngineEdition string `json:"engine_edition"`
+ IsNewVersion bool `json:"is_new_version"`
+ CreatedAt string `json:"created_at"`
+ IsAlert bool `json:"is_alert"`
+ IsAutoscaling bool `json:"is_autoscaling"`
+}
+
+type databaseNode struct {
+ Total int64 `json:"total"`
+ Items []databaseNodeItem `json:"items"`
+}
+
+type databaseNodeItem struct {
+ VdcName string `json:"vdc_name"`
+}
+
+// Response from API when requesting a database's detail
+type databaseReadResponse struct {
+ Code string `json:"code"`
+ Message string `json:"message"`
+ Data struct {
+ Cluster databaseData `json:"cluster"`
+ Node databaseNode `json:"nodes"`
+ }
+}
+
+type databaseCreateResponse struct {
+ Message string `json:"message"`
+ Type string `json:"type"`
+ Data databaseCreateResponseData `json:"data"`
+}
+
+// Response from API when creating a database
+type databaseCreateResponseData struct {
+ ClusterId string `json:"cluster_id"`
+ VpcId string `json:"vpc_id"`
+ NetworkId string `json:"network_id"`
+ VmNetwork string `json:"vm_network"`
+ TypeConfig string `json:"type_config"`
+ TypeDb string `json:"type_db"`
+ PortDb string `json:"port_db"`
+ Version string `json:"version"`
+ MasterCount int `json:"master_count"`
+ WorkerCount int `json:"worker_count"`
+ IsCluster string `json:"is_cluster"`
+ ClusterName string `json:"cluster_name"`
+ NodeCpu int `json:"node_cpu"`
+ NodeCore int `json:"node_core"`
+ NodeRam int `json:"node_ram"`
+ DataDiskSize int `json:"data_disk_size"`
+ VdcName string `json:"vdc_name"`
+ StorageProfile string `json:"storage_profile"`
+ IsOps string `json:"is_ops"`
+ Flavor string `json:"flavor"`
+ NodeCount int `json:"node_count"`
+ Status string `json:"status"`
+ Zone string `json:"zone"`
+ CreatedAt string `json:"created_at"`
+ UpdatedAt string `json:"updated_at"`
+}
diff --git a/fptcloud/database/resource_database_status.go b/fptcloud/database/resource_database_status.go
new file mode 100644
index 0000000..cbc2195
--- /dev/null
+++ b/fptcloud/database/resource_database_status.go
@@ -0,0 +1,366 @@
+package fptcloud_database
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ diag2 "github.com/hashicorp/terraform-plugin-framework/diag"
+ "github.com/hashicorp/terraform-plugin-framework/resource"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+ "github.com/hashicorp/terraform-plugin-log/tflog"
+ "strconv"
+ common "terraform-provider-fptcloud/commons"
+
+ "time"
+)
+
+var (
+ _ resource.Resource = &resourceDatabaseStatus{}
+ _ resource.ResourceWithConfigure = &resourceDatabaseStatus{}
+ _ resource.ResourceWithImportState = &resourceDatabaseStatus{}
+)
+
+type resourceDatabaseStatus struct {
+ client *common.Client
+}
+
+func NewResourceDatabaseStatus() resource.Resource {
+ return &resourceDatabaseStatus{}
+}
+
+func (r *resourceDatabaseStatus) Metadata(ctx context.Context, request resource.MetadataRequest, response *resource.MetadataResponse) {
+ response.TypeName = request.ProviderTypeName + "_database_status"
+}
+
+// Make sure that the database is in the appropriate state
+func (r *resourceDatabaseStatus) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) {
+ // Get user state of the resource (from terraform)
+ var currentState databaseStatusResourceModel
+ diags := request.Plan.Get(ctx, ¤tState)
+
+ response.Diagnostics.Append(diags...)
+ if response.Diagnostics.HasError() {
+ return
+ }
+
+ // Convert currentState to JSON
+ var database databaseStatusJson
+ r.remap(¤tState, &database)
+
+ // Getting current status of database on the server
+ status, err := r.getDatabaseCurrentStatus(ctx, database.Id)
+ if err != nil {
+ response.Diagnostics.Append(diag2.NewErrorDiagnostic("Can't find matching database", err.Error()))
+ return
+ }
+ if status == "failed" {
+ response.Diagnostics.Append(diag2.NewErrorDiagnostic("Database failed", err.Error()))
+ return
+ }
+
+ // Nếu database đang running và khách hàng cần stopped
+ if status == "running" && database.Status == "stopped" {
+ err = r.stopDatabase(ctx, database.Id)
+ if err != nil {
+ response.Diagnostics.Append(diag2.NewErrorDiagnostic("Can't stop database", err.Error()))
+ return
+ }
+ } else if status == "stopped" && database.Status == "running" {
+ err = r.startDatabase(ctx, database.Id)
+ if err != nil {
+ response.Diagnostics.Append(diag2.NewErrorDiagnostic("Can't start database", err.Error()))
+ return
+ }
+ }
+
+ // Update new state of resource to terraform state
+ currentState.Id = types.StringValue(database.Id)
+ currentState.Status = types.StringValue(database.Status)
+
+ diags = response.State.Set(ctx, ¤tState)
+ response.Diagnostics.Append(diags...)
+ if response.Diagnostics.HasError() {
+ return
+ }
+}
+
+func (r *resourceDatabaseStatus) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) {
+ var state databaseStatusResourceModel
+ diags := request.State.Get(ctx, &state)
+
+ response.Diagnostics.Append(diags...)
+ if response.Diagnostics.HasError() {
+ return
+ }
+
+ // Get current status of database
+ var err error
+ status, err := r.getDatabaseCurrentStatus(ctx, state.Id.ValueString())
+
+ if err != nil {
+ response.Diagnostics.Append(diag2.NewErrorDiagnostic("Can't find matching database", err.Error()))
+ return
+ } else if status == "failed" {
+ response.Diagnostics.Append(diag2.NewErrorDiagnostic("Database failed", err.Error()))
+ return
+ }
+
+ state.Id = types.StringValue(state.Id.ValueString())
+ state.Status = types.StringValue(status)
+
+ diags = response.State.Set(ctx, &state)
+ response.Diagnostics.Append(diags...)
+ if response.Diagnostics.HasError() {
+ return
+ }
+}
+
+func (r *resourceDatabaseStatus) Update(ctx context.Context, request resource.UpdateRequest, response *resource.UpdateResponse) {
+ panic("implement me")
+}
+
+func (r *resourceDatabaseStatus) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) {
+ var state databaseStatusResourceModel
+ diags := request.State.Get(ctx, &state)
+
+ response.Diagnostics.Append(diags...)
+ if response.Diagnostics.HasError() {
+ return
+ }
+}
+
+func (r *resourceDatabaseStatus) Schema(ctx context.Context, request resource.SchemaRequest, response *resource.SchemaResponse) {
+ response.Schema = schema.Schema{
+ Description: "Provides a Fpt database cluster status to temporarily stop or start a database.",
+ Attributes: map[string]schema.Attribute{
+ "id": schema.StringAttribute{
+ Required: true,
+ Description: "The Id of the database cluster.",
+ },
+ "status": schema.StringAttribute{
+ Required: true,
+ PlanModifiers: forceNewPlanModifiersString,
+ Description: "The status of the database cluster, must be 'running' or 'stopped'.",
+ },
+ },
+ }
+}
+
+func (r *resourceDatabaseStatus) ImportState(ctx context.Context, request resource.ImportStateRequest, response *resource.ImportStateResponse) {
+ var state databaseStatusResourceModel
+
+ state.Id = types.StringValue(request.ID)
+ err := r.internalRead(ctx, request.ID, &state)
+ if err != nil {
+ response.Diagnostics.Append(diag2.NewErrorDiagnostic("Error calling API in Import State Method", err.Error()))
+ return
+ }
+
+ diags := response.State.Set(ctx, &state)
+ response.Diagnostics.Append(diags...)
+ if response.Diagnostics.HasError() {
+ return
+ }
+}
+
+func (r *resourceDatabaseStatus) Configure(ctx context.Context, request resource.ConfigureRequest, response *resource.ConfigureResponse) {
+ if request.ProviderData == nil {
+ return
+ }
+
+ client, ok := request.ProviderData.(*common.Client)
+ if !ok {
+ response.Diagnostics.AddError(
+ "Unexpected Resource Configure Type",
+ fmt.Sprintf("Expected *internal.ClientV1, got: %T. Please report this issue to the provider developers.", request.ProviderData),
+ )
+
+ return
+ }
+
+ r.client = client
+
+ //a, err := newDatabaseApiClient(client)
+ //if err != nil {
+ // response.Diagnostics.AddError(
+ // "Error configuring API client",
+ // fmt.Sprintf("%s", err.Error()),
+ // )
+ // return
+ //}
+ //r.databaseClient = a
+}
+
+// Get current status of database (running, stopped, failed)
+func (r *resourceDatabaseStatus) getDatabaseCurrentStatus(ctx context.Context, databaseId string) (string, error) {
+ status := ""
+ var cluster databaseData
+
+ // Get database detail from API by database Id
+ path := common.ApiPath.DatabaseGet(databaseId)
+ a, err := r.client.SendGetRequest(path)
+ if err != nil {
+ return status, err
+ }
+
+ // Convert response to Go struct
+ var d databaseReadResponse
+ err = json.Unmarshal(a, &d)
+ if err != nil {
+ return status, err
+ }
+ if d.Code != "200" {
+ return status, fmt.Errorf("Database not found")
+ }
+ cluster = d.Data.Cluster
+
+ // Wait for database to be provisioned
+ timeStart := time.Now()
+ for cluster.Status != "running" && cluster.Status != "stopped" && cluster.Status != "failed" && time.Since(timeStart) < timeout {
+ path = common.ApiPath.DatabaseGet(databaseId)
+ a, err = r.client.SendGetRequest(path)
+ if err != nil {
+ return status, err
+ }
+ err = json.Unmarshal(a, &d)
+ if d.Code != "200" {
+ return status, fmt.Errorf("Database not found")
+ }
+ if err != nil {
+ return "", err
+ }
+ cluster = d.Data.Cluster
+
+ tflog.Info(ctx, "Waiting for database to be provisioned. Time waited: "+time.Since(timeStart).String())
+ time.Sleep(30 * time.Second)
+ }
+
+ if cluster.Status == "running" || cluster.Status == "stopped" || cluster.Status == "failed" {
+ status = cluster.Status
+ } else {
+ return "not found", fmt.Errorf("Request time out! Can not provision database")
+ }
+
+ return status, nil
+}
+
+// Stop a running database
+func (r *resourceDatabaseStatus) stopDatabase(ctx context.Context, databaseId string) error {
+ body := map[string]string{
+ "cluster_id": databaseId,
+ }
+
+ path := common.ApiPath.DatabaseStop()
+ _, err := r.client.SendPostRequest(path, body)
+ if err != nil {
+ tflog.Error(ctx, "Error stopping database: "+err.Error())
+ return err
+ }
+
+ timeStart := time.Now()
+ for time.Since(timeStart) < timeout {
+ status, err := r.getDatabaseCurrentStatus(ctx, databaseId)
+ if err != nil {
+ return err
+ }
+ if status == "stopped" {
+ return nil
+ }
+
+ tflog.Info(ctx, "Waiting for nodes to be stopped. Time waited: "+time.Since(timeStart).String())
+ time.Sleep(60 * time.Second)
+ }
+
+ return fmt.Errorf("Request time out! Can not stop database")
+}
+
+// Start a stopped database
+func (r *resourceDatabaseStatus) startDatabase(ctx context.Context, databaseId string) error {
+ body := map[string]string{
+ "cluster_id": databaseId,
+ }
+
+ path := common.ApiPath.DatabaseStart()
+ _, err := r.client.SendPostRequest(path, body)
+ if err != nil {
+ return err
+ }
+
+ timeStart := time.Now()
+ for time.Since(timeStart) < timeout {
+ status, err := r.getDatabaseCurrentStatus(ctx, databaseId)
+ if err != nil {
+ return err
+ }
+ if status == "running" {
+ return nil
+ }
+ tflog.Info(ctx, "Waiting for nodes to be provisioned. Time waited: "+time.Since(timeStart).String())
+ time.Sleep(60 * time.Second)
+ }
+
+ return fmt.Errorf("Request time out! Can not start database")
+}
+
+// Get resource data from API, then update to terraform state
+func (r *resourceDatabaseStatus) internalRead(ctx context.Context, databaseId string, state *databaseStatusResourceModel) error {
+ tflog.Info(ctx, "Reading state of Database Id "+databaseId+", VPC Id ")
+
+ var nodeTotal = 0
+ var timeStart = time.Now()
+ var node databaseNode
+ var cluster databaseData
+
+ for nodeTotal == 0 && time.Since(timeStart) < timeout {
+ // Get database detail from API by database Id
+ a, err := r.client.SendGetRequest(fmt.Sprintf("xplat/database/management/cluster/detail/%s", databaseId))
+ if err != nil {
+ return err
+ }
+
+ // Convert response to Go struct
+ var d databaseReadResponse
+ err = json.Unmarshal(a, &d)
+ cluster = d.Data.Cluster
+ node = d.Data.Node
+ if err != nil {
+ return err
+ }
+
+ // Update node_total
+ nodeTotal = int(node.Total)
+ if node.Total == 0 {
+ tflog.Info(ctx, "Waiting for nodes to be provisioned. Time waited: "+strconv.Itoa(int(time.Since(timeStart).Seconds()))+" seconds.")
+ time.Sleep(30 * time.Second)
+ }
+ }
+
+ if nodeTotal == 0 {
+ return fmt.Errorf("Request time out! Can not provision nodes for database")
+ } else {
+ // Update resource status to state
+ state.Id = types.StringValue(cluster.VpcId)
+ state.Status = types.StringValue(cluster.Status)
+ }
+
+ return nil
+}
+
+// Map data from databaseResourceModel to databaseJson
+func (r *resourceDatabaseStatus) remap(from *databaseStatusResourceModel, to *databaseStatusJson) {
+ to.Id = from.Id.ValueString()
+ to.Status = from.Status.ValueString()
+}
+
+// The database status json to send to API
+type databaseStatusJson struct {
+ Id string `json:"id"`
+ Status string `json:"status"`
+}
+
+// The database status managed in terraform
+type databaseStatusResourceModel struct {
+ Id types.String `tfsdk:"id" json:"id"`
+ Status types.String `tfsdk:"status" json:"status"`
+}
diff --git a/fptcloud/dfke/datasource_dfke.go b/fptcloud/dfke/datasource_dfke.go
new file mode 100644
index 0000000..0e3937c
--- /dev/null
+++ b/fptcloud/dfke/datasource_dfke.go
@@ -0,0 +1,315 @@
+package fptcloud_dfke
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "github.com/hashicorp/terraform-plugin-framework/datasource"
+ "github.com/hashicorp/terraform-plugin-framework/datasource/schema"
+ diag2 "github.com/hashicorp/terraform-plugin-framework/diag"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+ "github.com/hashicorp/terraform-plugin-log/tflog"
+ "terraform-provider-fptcloud/commons"
+)
+
+var (
+ _ datasource.DataSource = &datasourceDedicatedKubernetesEngine{}
+ _ datasource.DataSourceWithConfigure = &datasourceDedicatedKubernetesEngine{}
+)
+
+const (
+ noSuchClusterId = "no cluster with such ID found"
+)
+
+type datasourceDedicatedKubernetesEngine struct {
+ client *commons.Client
+ dfkeClient *dfkeApiClient
+ tenancyApiClient *TenancyApiClient
+}
+
+func NewDataSourceDedicatedKubernetesEngine() datasource.DataSource {
+ return &datasourceDedicatedKubernetesEngine{}
+}
+
+func (d *datasourceDedicatedKubernetesEngine) Configure(ctx context.Context, request datasource.ConfigureRequest, response *datasource.ConfigureResponse) {
+ if request.ProviderData == nil {
+ return
+ }
+
+ client, ok := request.ProviderData.(*commons.Client)
+ if !ok {
+ response.Diagnostics.AddError(
+ "Unexpected Resource Configure Type",
+ fmt.Sprintf("Expected *commons.Client, got: %T. Please report this issue to the provider developers.", request.ProviderData),
+ )
+
+ return
+ }
+
+ d.client = client
+ d.dfkeClient = newDfkeApiClient(client)
+
+ t := NewTenancyApiClient(client)
+ d.tenancyApiClient = t
+}
+
+func (d *datasourceDedicatedKubernetesEngine) Metadata(ctx context.Context, request datasource.MetadataRequest, response *datasource.MetadataResponse) {
+ response.TypeName = request.ProviderTypeName + "_dedicated_kubernetes_engine_v1"
+}
+
+func (d *datasourceDedicatedKubernetesEngine) Schema(ctx context.Context, request datasource.SchemaRequest, response *datasource.SchemaResponse) {
+ response.Schema = schema.Schema{
+ Description: "Retrieves information about dedicated FKE clusters",
+ Attributes: map[string]schema.Attribute{
+ "id": schema.StringAttribute{
+ Computed: true,
+ Description: "UUID of the cluster",
+ },
+ "vpc_id": schema.StringAttribute{
+ Required: true,
+ Description: "VPC ID",
+ },
+ "cluster_id": schema.StringAttribute{
+ Required: true,
+ Description: "Cluster ID, as shown on the dashboard, usually has a length of 8 characters",
+ },
+ "cluster_name": schema.StringAttribute{
+ Computed: true,
+ Description: "Cluster name",
+ },
+ "k8s_version": schema.StringAttribute{
+ Computed: true,
+ Description: "Kubernetes version",
+ },
+ "master_type": schema.StringAttribute{
+ Computed: true,
+ Description: "ID of the flavor of master node",
+ },
+ "master_count": schema.Int64Attribute{
+ Computed: true,
+ Description: "Number of master node",
+ },
+ "master_disk_size": schema.Int64Attribute{
+ Computed: true,
+ Description: "Master node disk capacity in GB",
+ },
+ "worker_type": schema.StringAttribute{
+ Computed: true,
+ Description: "ID of the flavor of worker node",
+ },
+ "worker_disk_size": schema.Int64Attribute{
+ Computed: true,
+ Description: "Worker node disk capacity in GB",
+ },
+ "network_id": schema.StringAttribute{
+ Computed: true,
+ Description: "Network UUID",
+ },
+ "lb_size": schema.StringAttribute{
+ Computed: true,
+ Description: "Load balancer size",
+ },
+ "pod_network": schema.StringAttribute{
+ Computed: true,
+ Description: "Pod network in CIDR notation",
+ },
+ "service_network": schema.StringAttribute{
+ Computed: true,
+ Description: "Service network in CIDR notation",
+ },
+ "network_node_prefix": schema.Int64Attribute{
+ Computed: true,
+ Description: "Network node prefix",
+ },
+ "max_pod_per_node": schema.Int64Attribute{
+ Computed: true,
+ Description: "Max pods per node",
+ },
+ "nfs_status": schema.StringAttribute{
+ Computed: true,
+ Description: "NFS status",
+ },
+ "nfs_disk_size": schema.Int64Attribute{
+ Computed: true,
+ Description: "NFS disk size",
+ },
+ "storage_policy": schema.StringAttribute{
+ Computed: true,
+ Description: "Storage policy",
+ },
+ "edge_id": schema.StringAttribute{
+ Computed: true,
+ Description: "Edge ID",
+ },
+ "scale_min": schema.Int64Attribute{
+ Computed: true,
+ Description: "Minimum number of nodes for autoscaling",
+ },
+ "scale_max": schema.Int64Attribute{
+ Computed: true,
+ Description: "Maximum number of nodes for autoscaling",
+ },
+ "node_dns": schema.StringAttribute{
+ Computed: true,
+ Description: "DNS server of nodes",
+ },
+ "ip_public_firewall": schema.StringAttribute{
+ Computed: true,
+ Description: "IP public firewall",
+ },
+ "ip_private_firewall": schema.StringAttribute{
+ Computed: true,
+ Description: "IP private firewall",
+ },
+ "region_id": schema.StringAttribute{
+ Computed: true,
+ Description: "Region ID",
+ },
+ },
+ }
+}
+
+func (d *datasourceDedicatedKubernetesEngine) Read(ctx context.Context, request datasource.ReadRequest, response *datasource.ReadResponse) {
+ var state dedicatedKubernetesEngine
+ diags := request.Config.Get(ctx, &state)
+
+ response.Diagnostics.Append(diags...)
+ if response.Diagnostics.HasError() {
+ return
+ }
+
+ clusterId := state.ClusterId.ValueString()
+ uuid, err := d.findClusterUUID(ctx, state.vpcId(), clusterId)
+ if err != nil {
+ response.Diagnostics.Append(diag2.NewErrorDiagnostic("Error resolving cluster UUID", err.Error()))
+ return
+ }
+
+ _, err = d.internalRead(ctx, uuid, &state)
+ if err != nil {
+ response.Diagnostics.Append(diag2.NewErrorDiagnostic("Error calling API", err.Error()))
+ return
+ }
+
+ diags = response.State.Set(ctx, &state)
+ response.Diagnostics.Append(diags...)
+ if response.Diagnostics.HasError() {
+ return
+ }
+}
+
+func (d *datasourceDedicatedKubernetesEngine) internalRead(ctx context.Context, clusterId string, state *dedicatedKubernetesEngine) (*dedicatedKubernetesEngineReadResponse, error) {
+ vpcId := state.VpcId.ValueString()
+ tflog.Info(ctx, "Reading state of cluster ID "+clusterId+", VPC ID "+vpcId)
+
+ a, err := d.client.SendGetRequest(commons.ApiPath.DedicatedFKEGet(vpcId, clusterId))
+
+ if err != nil {
+ return nil, err
+ }
+
+ var readResponse dedicatedKubernetesEngineReadResponse
+ err = json.Unmarshal(a, &readResponse)
+ if err != nil {
+ tflog.Info(ctx, "Error unmarshalling cluster info for cluster "+clusterId)
+ return nil, err
+ }
+ data := readResponse.Cluster
+
+ var awx dedicatedKubernetesEngineParams
+ err = json.Unmarshal([]byte(data.AwxParams), &awx)
+
+ if err != nil {
+ tflog.Info(ctx, "Error unmarshalling AWX params for cluster "+clusterId)
+ tflog.Info(ctx, "AwxParams is "+data.AwxParams)
+ return nil, err
+ }
+
+ // resolve edge ID
+ edgeId, err := d.dfkeClient.FindEdgeByEdgeGatewayId(ctx, vpcId, data.EdgeID)
+ if err != nil {
+ return nil, err
+ }
+
+ state.ClusterId = types.StringValue(data.ClusterID)
+ state.ClusterName = types.StringValue(data.Name)
+ state.Version = types.StringValue(awx.K8SVersion)
+ state.MasterType = types.StringValue(awx.MasterType)
+ state.MasterCount = types.Int64Value(int64(awx.MasterCount))
+ state.MasterDiskSize = types.Int64Value(int64(awx.MasterDiskSize))
+ state.WorkerType = types.StringValue(awx.WorkerType)
+ state.WorkerDiskSize = types.Int64Value(int64(awx.WorkerDiskSize))
+ state.NetworkID = types.StringValue(data.NetworkID)
+ state.LbSize = types.StringValue(awx.LbSize)
+ state.PodNetwork = types.StringValue(awx.PodNetwork + "/" + awx.PodPrefix)
+ state.ServiceNetwork = types.StringValue(awx.ServiceNetwork + "/" + awx.ServicePrefix)
+ state.NetworkNodePrefix = types.Int64Value(int64(awx.NetworkNodePrefix))
+ state.MaxPodPerNode = types.Int64Value(int64(awx.K8SMaxPod))
+ state.NfsStatus = types.StringValue(awx.NfsStatus)
+ state.NfsDiskSize = types.Int64Value(int64(awx.NfsDiskSize))
+ state.StoragePolicy = types.StringValue(awx.StorageProfile)
+ state.EdgeID = types.StringValue(edgeId)
+ state.ScaleMin = types.Int64Value(int64(awx.ScaleMinSize))
+ state.ScaleMax = types.Int64Value(int64(awx.ScaleMaxSize))
+ state.NodeDNS = types.StringValue(awx.NodeDNS)
+ state.IPPublicFirewall = types.StringValue(awx.IPPublicFirewall)
+ state.IPPrivateFirewall = types.StringValue(awx.IPPrivateFirewall)
+ state.VpcId = types.StringValue(data.VpcID)
+ //state.CustomScript = awx.CustomScript
+ //state.EnableCustomScript = awx.EnableCustomScript
+ region, err := getRegionFromVpcId(d.tenancyApiClient, ctx, vpcId)
+ if err != nil {
+ return nil, err
+ }
+ state.RegionId = types.StringValue(region)
+
+ return &readResponse, nil
+}
+
+func (d *datasourceDedicatedKubernetesEngine) findClusterUUID(_ context.Context, vpcId string, clusterId string) (string, error) {
+ total := 1
+ found := 0
+
+ index := 1
+ for found < total {
+ path := commons.ApiPath.DedicatedFKEList(vpcId, index, 25)
+ data, err := d.client.SendGetRequest(path)
+ if err != nil {
+ return "", err
+ }
+
+ var list dedicatedKubernetesEngineList
+ err = json.Unmarshal(data, &list)
+ if err != nil {
+ return "", err
+ }
+
+ if list.Total == 0 {
+ return "", errors.New(noSuchClusterId)
+ }
+
+ if len(list.Data) == 0 {
+ return "", errors.New(noSuchClusterId)
+ }
+
+ total = list.Total
+ index += 1
+ for _, entry := range list.Data {
+ if entry.ClusterId == clusterId {
+ return entry.Id, nil
+ }
+ }
+ }
+
+ return "", errors.New("no cluster with such ID found")
+}
+
+type dedicatedKubernetesEngineList struct {
+ Data []struct {
+ ClusterName string `json:"cluster_name"`
+ ClusterId string `json:"cluster_id,omitempty"`
+ Id string `json:"id,omitempty"`
+ } `json:"data"`
+ Total int `json:"total"`
+}
diff --git a/fptcloud/dfke/dfke_service.go b/fptcloud/dfke/dfke_service.go
new file mode 100644
index 0000000..f402566
--- /dev/null
+++ b/fptcloud/dfke/dfke_service.go
@@ -0,0 +1,52 @@
+package fptcloud_dfke
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "github.com/hashicorp/terraform-plugin-log/tflog"
+ "strings"
+ "terraform-provider-fptcloud/commons"
+ fptcloud_subnet "terraform-provider-fptcloud/fptcloud/subnet"
+)
+
+type dfkeApiClient struct {
+ *commons.Client
+}
+
+func newDfkeApiClient(c *commons.Client) *dfkeApiClient {
+ return &dfkeApiClient{Client: c}
+}
+
+type edgeListResponse struct {
+ Data []fptcloud_subnet.EdgeGateway `json:"data"`
+}
+
+func (a *dfkeApiClient) FindEdgeByEdgeGatewayId(ctx context.Context, vpcId string, edgeId string) (string, error) {
+ if !strings.HasPrefix(edgeId, "urn:vcloud:gateway") {
+ return "", errors.New("edge gateway id must be prefixed with \"urn:vcloud:gateway\"")
+ }
+
+ tflog.Info(ctx, "Resolving edge by gateway ID "+edgeId)
+
+ path := fmt.Sprintf("/v1/vmware/vpc/%s/edge_gateway/list", vpcId)
+ r, err := a.Client.SendGetRequest(path)
+ if err != nil {
+ return "", err
+ }
+
+ var edgeList edgeListResponse
+ err = json.Unmarshal(r, &edgeList)
+ if err != nil {
+ return "", err
+ }
+
+ for _, edge := range edgeList.Data {
+ if edge.EdgeGatewayId == edgeId {
+ return edge.ID, nil
+ }
+ }
+
+ return "", errors.New("edge gateway not found")
+}
diff --git a/fptcloud/dfke/resource_dfke.go b/fptcloud/dfke/resource_dfke.go
new file mode 100644
index 0000000..ae61de2
--- /dev/null
+++ b/fptcloud/dfke/resource_dfke.go
@@ -0,0 +1,992 @@
+package fptcloud_dfke
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ diag2 "github.com/hashicorp/terraform-plugin-framework/diag"
+ "github.com/hashicorp/terraform-plugin-framework/resource"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/int64planmodifier"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+ "github.com/hashicorp/terraform-plugin-log/tflog"
+ "strings"
+ "terraform-provider-fptcloud/commons"
+ fptcloud_vpc "terraform-provider-fptcloud/fptcloud/vpc"
+ "time"
+)
+
+var (
+ _ resource.Resource = &resourceDedicatedKubernetesEngine{}
+ _ resource.ResourceWithConfigure = &resourceDedicatedKubernetesEngine{}
+ _ resource.ResourceWithImportState = &resourceDedicatedKubernetesEngine{}
+
+ forceNewPlanModifiersString = []planmodifier.String{
+ stringplanmodifier.RequiresReplace(),
+ }
+
+ forceNewPlanModifiersInt = []planmodifier.Int64{
+ int64planmodifier.RequiresReplace(),
+ }
+)
+
+const (
+ errorCallingApi = "Error calling API"
+ responseBodyPrefix = "Response body was "
+)
+
+type resourceDedicatedKubernetesEngine struct {
+ client *commons.Client
+ dfkeClient *dfkeApiClient
+ tenancyApiClient *TenancyApiClient
+}
+
+func (r *resourceDedicatedKubernetesEngine) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) {
+ var state dedicatedKubernetesEngine
+ diags := request.Plan.Get(ctx, &state)
+
+ response.Diagnostics.Append(diags...)
+ if response.Diagnostics.HasError() {
+ return
+ }
+
+ var f dedicatedKubernetesEngineJson
+ r.remap(&state, &f)
+
+ f.CustomScript = ""
+ f.EnableCustomScript = false
+ f.PublicKey = ""
+ f.UpstreamDNS = ""
+
+ client := r.client
+ a, err := client.SendPostRequest(fmt.Sprintf("/v1/xplat/fke/vpc/%s/kubernetes", state.vpcId()), f)
+
+ if err != nil {
+ response.Diagnostics.Append(diag2.NewErrorDiagnostic("Error calling API", err.Error()))
+ return
+ }
+
+ errorResponse := checkForError(a)
+ if errorResponse != nil {
+ response.Diagnostics.Append(errorResponse)
+ return
+ }
+
+ var createResponse dedicatedKubernetesEngineCreateResponse
+ if err = json.Unmarshal(a, &createResponse); err != nil {
+ response.Diagnostics.Append(diag2.NewErrorDiagnostic("Error unmarshalling response", err.Error()))
+ return
+ }
+
+ tflog.Info(ctx, "create response was "+string(a))
+
+ tflog.Info(ctx, "Created cluster with id "+createResponse.Cluster.ID)
+
+ state.Id = types.StringValue(createResponse.Cluster.ID)
+
+ if err = r.waitForSucceeded(ctx, &state, 30*time.Minute, true); err != nil {
+ response.Diagnostics.Append(diag2.NewErrorDiagnostic("Error waiting cluster up", err.Error()))
+ return
+ }
+ if _, err = r.internalRead(ctx, createResponse.Cluster.ID, &state); err != nil {
+ response.Diagnostics.Append(diag2.NewErrorDiagnostic("Error reading cluster state", err.Error()))
+ return
+ }
+
+ diags = response.State.Set(ctx, &state)
+ response.Diagnostics.Append(diags...)
+ if response.Diagnostics.HasError() {
+ return
+ }
+}
+
+func (r *resourceDedicatedKubernetesEngine) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) {
+ var state dedicatedKubernetesEngine
+ diags := request.State.Get(ctx, &state)
+
+ response.Diagnostics.Append(diags...)
+ if response.Diagnostics.HasError() {
+ return
+ }
+
+ _, err := r.internalRead(ctx, state.Id.ValueString(), &state)
+ if err != nil {
+ response.Diagnostics.Append(diag2.NewErrorDiagnostic(errorCallingApi, err.Error()))
+ return
+ }
+
+ diags = response.State.Set(ctx, &state)
+ response.Diagnostics.Append(diags...)
+ if response.Diagnostics.HasError() {
+ return
+ }
+}
+
+func (r *resourceDedicatedKubernetesEngine) Update(ctx context.Context, request resource.UpdateRequest, response *resource.UpdateResponse) {
+ var state dedicatedKubernetesEngine
+ diags := request.State.Get(ctx, &state)
+
+ response.Diagnostics.Append(diags...)
+ if response.Diagnostics.HasError() {
+ return
+ }
+
+ var plan dedicatedKubernetesEngine
+ request.Plan.Get(ctx, &plan)
+
+ response.Diagnostics.Append(diags...)
+ if response.Diagnostics.HasError() {
+ return
+ }
+
+ //tflog.Info(ctx, "Reading existing state of cluster ID "+state.Id.ValueString()+", VPC "+state.vpcId())
+ //err := r.internalRead(ctx, state.Id.ValueString(), &existing)
+ //if err != nil {
+ // response.Diagnostics.Append(diag2.NewErrorDiagnostic("Error getting existing state", err.Error()))
+ // return
+ //}
+
+ errDiag := r.diff(ctx, &state, &plan)
+ if errDiag != nil {
+ response.Diagnostics.Append(errDiag)
+ return
+ }
+
+ _, err := r.internalRead(ctx, state.Id.ValueString(), &state)
+ if err != nil {
+ response.Diagnostics.Append(diag2.NewErrorDiagnostic("Error refreshing state", err.Error()))
+ return
+ }
+
+ diags = response.State.Set(ctx, &state)
+ response.Diagnostics.Append(diags...)
+ if response.Diagnostics.HasError() {
+ return
+ }
+}
+
+func (r *resourceDedicatedKubernetesEngine) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) {
+ var state dedicatedKubernetesEngine
+ diags := request.State.Get(ctx, &state)
+
+ response.Diagnostics.Append(diags...)
+ if response.Diagnostics.HasError() {
+ return
+ }
+
+ _, err := r.client.SendDeleteRequest(fmt.Sprintf("/v1/xplat/fke/vpc/%s/cluster/%s/delete", state.vpcId(), state.Id))
+ if err != nil {
+ response.Diagnostics.Append(diag2.NewErrorDiagnostic(errorCallingApi, err.Error()))
+ return
+ }
+}
+
+func (r *resourceDedicatedKubernetesEngine) ImportState(ctx context.Context, request resource.ImportStateRequest, response *resource.ImportStateResponse) {
+ tflog.Info(ctx, "Importing DFKE cluster ID "+request.ID)
+
+ var state dedicatedKubernetesEngine
+
+ // format: vpcId/clusterId
+ id := request.ID
+ pieces := strings.Split(id, "/")
+ if len(pieces) != 2 {
+ response.Diagnostics.Append(diag2.NewErrorDiagnostic("Invalid format", "must be in format vpcId/clusterId"))
+ return
+ }
+
+ vpcId := pieces[0]
+ clusterId := pieces[1]
+
+ state.VpcId = types.StringValue(vpcId)
+
+ state.Id = types.StringValue(clusterId)
+ _, err := r.internalRead(ctx, clusterId, &state)
+ if err != nil {
+ response.Diagnostics.Append(diag2.NewErrorDiagnostic(errorCallingApi, err.Error()))
+ return
+ }
+
+ diags := response.State.Set(ctx, &state)
+ response.Diagnostics.Append(diags...)
+ if response.Diagnostics.HasError() {
+ return
+ }
+}
+
+func NewResourceDedicatedKubernetesEngine() resource.Resource {
+ return &resourceDedicatedKubernetesEngine{}
+}
+
+func (r *resourceDedicatedKubernetesEngine) Metadata(ctx context.Context, request resource.MetadataRequest, response *resource.MetadataResponse) {
+ response.TypeName = request.ProviderTypeName + "_dedicated_kubernetes_engine_v1"
+}
+
+func (r *resourceDedicatedKubernetesEngine) Schema(ctx context.Context, request resource.SchemaRequest, response *resource.SchemaResponse) {
+ response.Schema = schema.Schema{
+ Description: "Manage dedicated FKE clusters.",
+ Attributes: map[string]schema.Attribute{
+ "id": schema.StringAttribute{
+ Computed: true,
+ Description: "Cluster UUID",
+ },
+ "cluster_id": schema.StringAttribute{
+ Computed: true,
+ Description: "Cluster slug",
+ },
+ "cluster_name": schema.StringAttribute{
+ Required: true,
+ Description: "Cluster name",
+ },
+ "k8s_version": schema.StringAttribute{
+ Required: true,
+ PlanModifiers: forceNewPlanModifiersString,
+ Description: "Kubernetes version",
+ },
+ "master_type": schema.StringAttribute{
+ Required: true,
+ Description: "Flavor ID of master node",
+ },
+ "master_count": schema.Int64Attribute{
+ Required: true,
+ PlanModifiers: forceNewPlanModifiersInt,
+ Description: "Number of master nodes",
+ },
+ "master_disk_size": schema.Int64Attribute{
+ Required: true,
+ Description: "Disk size of master node in GB",
+ },
+ "worker_type": schema.StringAttribute{
+ Required: true,
+ Description: "Flavor ID of worker node",
+ },
+ "worker_disk_size": schema.Int64Attribute{
+ Required: true,
+ Description: "Disk size of worker node in GB",
+ },
+ "network_id": schema.StringAttribute{
+ Required: true,
+ PlanModifiers: forceNewPlanModifiersString,
+ Description: "Disk size of worker node in GB",
+ },
+ "lb_size": schema.StringAttribute{
+ Required: true,
+ PlanModifiers: forceNewPlanModifiersString,
+ Description: "Load balancer size",
+ },
+ "pod_network": schema.StringAttribute{
+ Required: true,
+ PlanModifiers: forceNewPlanModifiersString,
+ Description: "Pod network in CIDR notation",
+ },
+ "service_network": schema.StringAttribute{
+ Required: true,
+ PlanModifiers: forceNewPlanModifiersString,
+ Description: "Service network in CIDR notation",
+ },
+ "network_node_prefix": schema.Int64Attribute{
+ Required: true,
+ PlanModifiers: forceNewPlanModifiersInt,
+ Description: "Network node prefix",
+ },
+ "max_pod_per_node": schema.Int64Attribute{
+ Required: true,
+ PlanModifiers: forceNewPlanModifiersInt,
+ Description: "Max pods per node",
+ },
+ "nfs_status": schema.StringAttribute{
+ Required: true,
+ PlanModifiers: forceNewPlanModifiersString,
+ Description: "NFS status",
+ },
+ "nfs_disk_size": schema.Int64Attribute{
+ Required: true,
+ PlanModifiers: forceNewPlanModifiersInt,
+ Description: "NFS disk size",
+ },
+ //"public_key": schema.StringAttribute{
+ // Required:true,
+ // PlanModifiers: forceNewPlanModifiersString,
+ //},
+ "storage_policy": schema.StringAttribute{
+ Required: true,
+ PlanModifiers: forceNewPlanModifiersString,
+ Description: "Storage policy",
+ },
+ "edge_id": schema.StringAttribute{
+ Required: true,
+ PlanModifiers: forceNewPlanModifiersString,
+ Description: "Edge ID",
+ },
+ //"upstream_dns": schema.Int64Attribute{
+ // Required:true,
+ // PlanModifiers: forceNewPlanModifiersInt,
+ //},
+ "scale_min": schema.Int64Attribute{
+ Required: true,
+ Description: "Minimum number of nodes for autoscaling",
+ },
+ "scale_max": schema.Int64Attribute{
+ Required: true,
+ Description: "Maximum number of nodes for autoscaling",
+ },
+ "node_dns": schema.StringAttribute{
+ Required: true,
+ PlanModifiers: forceNewPlanModifiersString,
+ Description: "DNS server of nodes",
+ },
+ "ip_public_firewall": schema.StringAttribute{
+ Required: true,
+ PlanModifiers: forceNewPlanModifiersString,
+ Description: "IP public firewall",
+ },
+ "ip_private_firewall": schema.StringAttribute{
+ Required: true,
+ PlanModifiers: forceNewPlanModifiersString,
+ Description: "IP private firewall",
+ },
+ "vpc_id": schema.StringAttribute{
+ Required: true,
+ PlanModifiers: forceNewPlanModifiersString,
+ Description: "VPC ID",
+ },
+ "region_id": schema.StringAttribute{
+ Required: true,
+ PlanModifiers: forceNewPlanModifiersString,
+ Description: "Region ID",
+ },
+ },
+ }
+}
+
+func (r *resourceDedicatedKubernetesEngine) Configure(ctx context.Context, request resource.ConfigureRequest, response *resource.ConfigureResponse) {
+ if request.ProviderData == nil {
+ return
+ }
+
+ client, ok := request.ProviderData.(*commons.Client)
+ if !ok {
+ response.Diagnostics.AddError(
+ "Unexpected Resource Configure Type",
+ fmt.Sprintf("Expected *commons.Client, got: %T. Please report this issue to the provider developers.", request.ProviderData),
+ )
+
+ return
+ }
+
+ r.client = client
+ r.dfkeClient = newDfkeApiClient(client)
+
+ t := NewTenancyApiClient(client)
+ r.tenancyApiClient = t
+}
+
+func (r *resourceDedicatedKubernetesEngine) internalRead(ctx context.Context, clusterId string, state *dedicatedKubernetesEngine) (*dedicatedKubernetesEngineReadResponse, error) {
+ vpcId := state.VpcId.ValueString()
+ tflog.Info(ctx, "Reading state of cluster ID "+clusterId+", VPC ID "+vpcId)
+
+ a, err := r.client.SendGetRequest(commons.ApiPath.DedicatedFKEGet(vpcId, clusterId))
+
+ if err != nil {
+ return nil, err
+ }
+
+ var d dedicatedKubernetesEngineReadResponse
+ err = json.Unmarshal(a, &d)
+ if err != nil {
+ return nil, err
+ }
+ data := d.Cluster
+
+ var awx dedicatedKubernetesEngineParams
+ err = json.Unmarshal([]byte(d.Cluster.AwxParams), &awx)
+
+ if err != nil {
+ return nil, err
+ }
+
+ // resolve edge ID
+ edgeId, err := r.dfkeClient.FindEdgeByEdgeGatewayId(ctx, vpcId, data.EdgeID)
+ if err != nil {
+ return nil, err
+ }
+
+ state.ClusterId = types.StringValue(data.ClusterID)
+ state.ClusterName = types.StringValue(data.Name)
+ state.Version = types.StringValue(awx.K8SVersion)
+ state.MasterType = types.StringValue(awx.MasterType)
+ state.MasterCount = types.Int64Value(int64(awx.MasterCount))
+ state.MasterDiskSize = types.Int64Value(int64(awx.MasterDiskSize))
+ state.WorkerType = types.StringValue(awx.WorkerType)
+ state.WorkerDiskSize = types.Int64Value(int64(awx.WorkerDiskSize))
+ state.NetworkID = types.StringValue(data.NetworkID)
+ state.LbSize = types.StringValue(awx.LbSize)
+ state.PodNetwork = types.StringValue(awx.PodNetwork + "/" + awx.PodPrefix)
+ state.ServiceNetwork = types.StringValue(awx.ServiceNetwork + "/" + awx.ServicePrefix)
+ state.NetworkNodePrefix = types.Int64Value(int64(awx.NetworkNodePrefix))
+ state.MaxPodPerNode = types.Int64Value(int64(awx.K8SMaxPod))
+ state.NfsStatus = types.StringValue(awx.NfsStatus)
+ state.NfsDiskSize = types.Int64Value(int64(awx.NfsDiskSize))
+ state.StoragePolicy = types.StringValue(awx.StorageProfile)
+ state.EdgeID = types.StringValue(edgeId)
+ state.ScaleMin = types.Int64Value(int64(awx.ScaleMinSize))
+ state.ScaleMax = types.Int64Value(int64(awx.ScaleMaxSize))
+ state.NodeDNS = types.StringValue(awx.NodeDNS)
+ state.IPPublicFirewall = types.StringValue(awx.IPPublicFirewall)
+ state.IPPrivateFirewall = types.StringValue(awx.IPPrivateFirewall)
+ state.VpcId = types.StringValue(data.VpcID)
+ //state.CustomScript = awx.CustomScript
+ //state.EnableCustomScript = awx.EnableCustomScript
+ region, err := getRegionFromVpcId(r.tenancyApiClient, ctx, vpcId)
+ if err != nil {
+ return nil, err
+ }
+ state.RegionId = types.StringValue(region)
+
+ return &d, nil
+}
+
+func checkForError(a []byte) *diag2.ErrorDiagnostic {
+ var re map[string]interface{}
+ err := json.Unmarshal(a, &re)
+ if err != nil {
+ res := diag2.NewErrorDiagnostic("Error unmarshalling response", err.Error())
+ return &res
+ }
+
+ if errorField, ok := re["error"]; ok {
+ e2, isBool := errorField.(bool)
+ if isBool && e2 {
+ res := diag2.NewErrorDiagnostic(
+ fmt.Sprintf("Response contained an error field and value was %t", e2),
+ responseBodyPrefix+string(a),
+ )
+ return &res
+ }
+
+ if isBool {
+ return nil
+ }
+
+ if errorField != nil {
+ res := diag2.NewErrorDiagnostic("Response contained an error field", responseBodyPrefix+string(a))
+ return &res
+ }
+ }
+
+ if errorCode, ok := re["error_code"]; ok {
+ if errorCode != nil {
+ res := diag2.NewErrorDiagnostic("Response contained an error code field", responseBodyPrefix+string(a))
+ return &res
+ }
+ }
+
+ return nil
+}
+
+func (r *resourceDedicatedKubernetesEngine) remap(from *dedicatedKubernetesEngine, to *dedicatedKubernetesEngineJson) {
+ to.ClusterName = from.ClusterName.ValueString()
+ to.ClusterId = from.ClusterId.ValueString()
+ to.Id = from.Id.ValueString()
+ to.Version = from.Version.ValueString()
+ to.MasterType = from.MasterType.ValueString()
+ to.MasterCount = from.MasterCount.ValueInt64()
+ to.MasterDiskSize = from.MasterDiskSize.ValueInt64()
+ to.WorkerType = from.WorkerType.ValueString()
+ to.WorkerDiskSize = from.WorkerDiskSize.ValueInt64()
+ to.NetworkID = from.NetworkID.ValueString()
+ to.LbSize = from.LbSize.ValueString()
+ to.PodNetwork = from.PodNetwork.ValueString()
+ to.ServiceNetwork = from.ServiceNetwork.ValueString()
+ to.NetworkNodePrefix = from.NetworkNodePrefix.ValueInt64()
+ to.MaxPodPerNode = from.MaxPodPerNode.ValueInt64()
+ to.NfsStatus = from.NfsStatus.ValueString()
+ to.NfsDiskSize = from.NfsDiskSize.ValueInt64()
+ to.StoragePolicy = from.StoragePolicy.ValueString()
+ to.EdgeID = from.EdgeID.ValueString()
+ to.ScaleMin = from.ScaleMin.ValueInt64()
+ to.ScaleMax = from.ScaleMax.ValueInt64()
+ to.NodeDNS = from.NodeDNS.ValueString()
+ to.IPPublicFirewall = from.IPPublicFirewall.ValueString()
+ to.IPPrivateFirewall = from.IPPrivateFirewall.ValueString()
+ to.RegionId = from.RegionId.ValueString()
+}
+
+func (r *resourceDedicatedKubernetesEngine) diff(ctx context.Context, from *dedicatedKubernetesEngine, to *dedicatedKubernetesEngine) *diag2.ErrorDiagnostic {
+ master := from.MasterDiskSize.ValueInt64()
+ master2 := to.MasterDiskSize.ValueInt64()
+ // status: EXTENDING
+
+ if err := r.diskExtend(ctx, from, to, "master", master, master2); err != nil {
+ return err
+ }
+
+ worker := from.WorkerDiskSize.ValueInt64()
+ worker2 := to.WorkerDiskSize.ValueInt64()
+ // status: EXTENDING
+
+ if err := r.diskExtend(ctx, from, to, "worker", worker, worker2); err != nil {
+ return err
+ }
+
+ masterType := from.MasterType.ValueString()
+ master2Type := to.MasterType.ValueString()
+ if err := r.changeFlavor(ctx, from, to, "master", masterType, master2Type); err != nil {
+ return err
+ }
+
+ workerType := from.WorkerType.ValueString()
+ worker2Type := to.WorkerType.ValueString()
+ if err := r.changeFlavor(ctx, from, to, "worker", workerType, worker2Type); err != nil {
+ return err
+ }
+
+ if (from.ScaleMin.ValueInt64() != to.ScaleMin.ValueInt64()) || (from.ScaleMax.ValueInt64() != to.ScaleMax.ValueInt64()) {
+ tflog.Info(ctx, fmt.Sprintf(
+ "Changing autoscale from (%d-%d) to (%d-%d)",
+ from.ScaleMin.ValueInt64(), from.ScaleMax.ValueInt64(),
+ to.ScaleMin.ValueInt64(), to.ScaleMax.ValueInt64(),
+ ))
+ autoScale := dedicatedKubernetesEngineAutoscale{
+ ClusterId: to.clusterUUID(),
+ ScaleMin: to.ScaleMin.ValueInt64(),
+ ScaleMax: to.ScaleMax.ValueInt64(),
+ ActionScale: "update",
+ }
+
+ if err := r.manage(from, autoScale, true); err != nil {
+ return err
+ }
+
+ tflog.Info(ctx, fmt.Sprintf(
+ "Changed autoscale to to (%d-%d)",
+ to.ScaleMin.ValueInt64(), to.ScaleMax.ValueInt64(),
+ ))
+
+ err := r.waitForSucceeded(ctx, from, 5*time.Minute, false)
+ if err != nil {
+ d := diag2.NewErrorDiagnostic("Error waiting for cluster after updating autoscale to return to SUCCEEDED state", err.Error())
+ return &d
+ }
+ }
+
+ if err := r.upgradeVersion(ctx, from, to); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (r *resourceDedicatedKubernetesEngine) diskExtend(ctx context.Context, from *dedicatedKubernetesEngine, to *dedicatedKubernetesEngine, node string, fromCount, toCount int64) *diag2.ErrorDiagnostic {
+ if fromCount == toCount {
+ return nil
+ }
+
+ if toCount < fromCount {
+ d := diag2.NewErrorDiagnostic(fmt.Sprintf("Wrong %s disk size", node), "Disk cannot be shrunk")
+ return &d
+ }
+
+ tflog.Info(ctx, fmt.Sprintf("Resizing %s from %d to %d", node, fromCount, toCount))
+ time.Sleep(5 * time.Second)
+
+ management := dedicatedKubernetesEngineManagement{
+ ClusterId: to.clusterUUID(),
+ MgmtAction: "",
+ DiskExtend: fmt.Sprintf("%d", toCount-fromCount),
+ ExtendType: node,
+ Flavor: "",
+ NodeType: "",
+ }
+
+ if err := r.manage(from, management, false); err != nil {
+ return err
+ }
+ tflog.Info(ctx, fmt.Sprintf("Resized disk %s from %d to %d", node, fromCount, toCount))
+
+ err := r.waitForSucceeded(ctx, from, 5*time.Minute, false)
+ if err != nil {
+ d := diag2.NewErrorDiagnostic(
+ fmt.Sprintf("Error waiting for cluster after resizing %s disk to return to SUCCEEDED state", node),
+ err.Error(),
+ )
+ return &d
+ }
+
+ return nil
+}
+
+func (r *resourceDedicatedKubernetesEngine) upgradeVersion(ctx context.Context, from *dedicatedKubernetesEngine, to *dedicatedKubernetesEngine) *diag2.ErrorDiagnostic {
+ if from.Version.ValueString() != to.Version.ValueString() {
+ // version changed, call bump version
+ path := commons.ApiPath.DedicatedFKEUpgradeVersion(from.vpcId(), from.Id.ValueString())
+
+ version := to.Version.ValueString()
+ if !strings.HasPrefix(version, "v") {
+ version = "v" + version
+ }
+ change := dedicatedKubernetesEngineUpgradeVersion{
+ VersionUpgrade: version,
+ ClusterId: from.clusterUUID(),
+ }
+
+ tflog.Info(ctx, fmt.Sprintf("Bumping version to %s", to.Version))
+
+ a, err2 := r.client.SendPostRequest(path, change)
+ if err2 != nil {
+ d := diag2.NewErrorDiagnostic("Error calling upgrade version API", err2.Error())
+ return &d
+ }
+
+ if diagErr2 := checkForError(a); diagErr2 != nil {
+ return diagErr2
+ }
+
+ err := r.waitForSucceeded(ctx, from, 1*time.Hour, false)
+ if err != nil {
+ d := diag2.NewErrorDiagnostic("Error waiting for cluster after upgrading to return to SUCCEEDED state", err.Error())
+ return &d
+ }
+ }
+
+ return nil
+}
+
+func (r *resourceDedicatedKubernetesEngine) changeFlavor(ctx context.Context, from *dedicatedKubernetesEngine, _ *dedicatedKubernetesEngine, node string, fromFlavor, toFlavor string) *diag2.ErrorDiagnostic {
+
+ if fromFlavor != toFlavor {
+ tflog.Info(ctx, fmt.Sprintf("Changing %s from %s to %s", node, fromFlavor, toFlavor))
+
+ management := dedicatedKubernetesEngineManagement{
+ ClusterId: from.clusterUUID(),
+ MgmtAction: "",
+ DiskExtend: "0",
+ ExtendType: "",
+ Flavor: toFlavor,
+ NodeType: node,
+ }
+
+ if err := r.manage(from, management, false); err != nil {
+ return err
+ }
+ tflog.Info(ctx, fmt.Sprintf("Changed %s from %s to %s", node, fromFlavor, toFlavor))
+
+ err := r.waitForSucceeded(ctx, from, 20*time.Minute, false)
+ if err != nil {
+ d := diag2.NewErrorDiagnostic(fmt.Sprintf("Error waiting for cluster after changing %s type to return to SUCCEEDED state", node), err.Error())
+ return &d
+ }
+ }
+
+ return nil
+}
+
+func (r *resourceDedicatedKubernetesEngine) manage(state *dedicatedKubernetesEngine, params interface{}, isAutoScale bool) *diag2.ErrorDiagnostic {
+ path := commons.ApiPath.DedicatedFKEManagement(state.vpcId(), state.clusterUUID())
+ if isAutoScale {
+ path = fmt.Sprintf("/v1/xplat/fke/vpc/%s/cluster/%s/auto-scale", state.vpcId(), state.clusterUUID())
+ }
+
+ a, err2 := r.client.SendPostRequest(path, params)
+ if err2 != nil {
+ d := diag2.NewErrorDiagnostic("Error calling autoscale API", err2.Error())
+ return &d
+ }
+
+ if diagErr2 := checkForError(a); diagErr2 != nil {
+ return diagErr2
+ }
+
+ return nil
+}
+
+func (r *resourceDedicatedKubernetesEngine) waitForSucceeded(ctx context.Context, state *dedicatedKubernetesEngine, timeout time.Duration, ignoreError bool) error {
+ clusterId := state.clusterUUID()
+ durationText := fmt.Sprintf("%v", timeout)
+ tflog.Info(ctx, "Waiting for cluster "+clusterId+" to succeed, duration "+durationText)
+
+ ticker := time.NewTicker(5 * time.Second)
+ defer ticker.Stop()
+ to := time.NewTimer(timeout)
+ defer to.Stop()
+
+ for {
+ select {
+ case <-to.C:
+ return errors.New("Timed out waiting for cluster " + clusterId + " to return to success state")
+ case <-ticker.C:
+ {
+ var e error
+ tflog.Info(ctx, "Checking status of cluster "+clusterId)
+
+ localTimeout := 200 * time.Millisecond
+
+ for i := 0; i < 5; i++ {
+ status, err := r.internalRead(ctx, clusterId, &dedicatedKubernetesEngine{
+ ClusterId: state.ClusterId,
+ VpcId: state.VpcId,
+ })
+ e = err
+
+ if err != nil {
+ time.Sleep(localTimeout)
+ localTimeout *= 2
+ localTimeout = min(localTimeout, 30*time.Second)
+
+ continue
+ }
+
+ state := status.Cluster.Status
+ tflog.Info(ctx, "Status of cluster "+clusterId+" is currently "+state)
+ switch state {
+ case "SUCCEEDED":
+ {
+ return nil
+ }
+ case "ERROR":
+ {
+ return errors.New("cluster in error state")
+ }
+
+ case "STOPPED":
+ {
+ return errors.New("cluster is stopped")
+ }
+ }
+
+ }
+ if e != nil && !ignoreError {
+ return e
+ }
+ }
+ }
+ }
+}
+
+func (e *dedicatedKubernetesEngine) vpcId() string {
+ return e.VpcId.ValueString()
+}
+func (e *dedicatedKubernetesEngine) clusterUUID() string {
+ return e.Id.ValueString()
+}
+func getRegionFromVpcId(client *TenancyApiClient, ctx context.Context, vpcId string) (string, error) {
+ t, err := client.GetTenancy(ctx)
+ if err != nil {
+ return "", err
+ }
+
+ user := t.UserId
+ var vpcList []fptcloud_vpc.VPC
+ regionMap := map[string]string{}
+
+ for _, tenant := range t.Tenants {
+ regions, e := client.GetRegions(ctx, tenant.Id)
+ if e != nil {
+ return "", e
+ }
+
+ for _, region := range regions {
+ vpcs, e2 := client.ListVpcs(ctx, tenant.Id, user, region.Id)
+ if e2 != nil {
+ return "", e2
+ }
+
+ vpcList = append(vpcList, vpcs...)
+
+ for _, vpc := range vpcs {
+ regionMap[vpc.Id] = region.Abbr
+ }
+ }
+ }
+
+ for _, vpc := range vpcList {
+ if vpc.Id == vpcId {
+ return regionMap[vpc.Id], nil
+ }
+ }
+
+ return "", errors.New("no VPC found under this account with vpcId " + vpcId)
+}
+
+type dedicatedKubernetesEngine struct {
+ ClusterName types.String `tfsdk:"cluster_name" json:"cluster_name"`
+ ClusterId types.String `tfsdk:"cluster_id" json:"cluster_id,omitempty"`
+ Id types.String `tfsdk:"id" json:"id"`
+ Version types.String `tfsdk:"k8s_version" json:"k8s_version"`
+ MasterType types.String `tfsdk:"master_type"` // tfsdk:"master_type"
+ MasterCount types.Int64 `tfsdk:"master_count" json:"master_count"`
+ MasterDiskSize types.Int64 `tfsdk:"master_disk_size" json:"master_disk_size"`
+ WorkerType types.String `tfsdk:"worker_type" json:"worker_type"`
+ WorkerDiskSize types.Int64 `tfsdk:"worker_disk_size" json:"worker_disk_size"`
+ NetworkID types.String `tfsdk:"network_id" json:"network_id"`
+ LbSize types.String `tfsdk:"lb_size" json:"lb_size"`
+
+ PodNetwork types.String `tfsdk:"pod_network" json:"pod_network"`
+ ServiceNetwork types.String `tfsdk:"service_network" json:"service_network"`
+
+ NetworkNodePrefix types.Int64 `tfsdk:"network_node_prefix" json:"network_node_prefix"`
+
+ MaxPodPerNode types.Int64 `tfsdk:"max_pod_per_node" json:"max_pod_per_node"`
+ NfsStatus types.String `tfsdk:"nfs_status" json:"nfs_status"`
+ NfsDiskSize types.Int64 `tfsdk:"nfs_disk_size" json:"nfs_disk_size"`
+
+ StoragePolicy types.String `tfsdk:"storage_policy" json:"storage_policy"`
+ EdgeID types.String `tfsdk:"edge_id"`
+
+ ScaleMin types.Int64 `tfsdk:"scale_min" json:"scale_min"`
+ ScaleMax types.Int64 `tfsdk:"scale_max" json:"scale_max"`
+
+ NodeDNS types.String `tfsdk:"node_dns" json:"node_dns"`
+ IPPublicFirewall types.String `tfsdk:"ip_public_firewall" json:"ip_public_firewall"`
+ IPPrivateFirewall types.String `tfsdk:"ip_private_firewall" json:"ip_private_firewall"`
+ VpcId types.String `tfsdk:"vpc_id" json:"vpc_id"`
+ RegionId types.String `tfsdk:"region_id"`
+}
+
+type dedicatedKubernetesEngineJson struct {
+ ClusterName string `json:"cluster_name"`
+ ClusterId string `json:"cluster_id,omitempty"`
+ Id string `json:"id,omitempty"`
+ Version string `json:"k8s_version"`
+ IpPublic string `json:"ip_public"`
+ MasterType string `json:"master_type"`
+ MasterCount int64 `json:"master_count"`
+ MasterDiskSize int64 `json:"master_disk_size"`
+ WorkerType string `json:"worker_type"`
+ WorkerDiskSize int64 `json:"worker_disk_size"`
+ NetworkID string `json:"network_id"`
+ LbSize string `json:"lb_size"`
+
+ PodNetwork string `json:"pod_network"`
+ ServiceNetwork string `json:"service_network"`
+
+ NetworkNodePrefix int64 `json:"network_node_prefix"`
+
+ MaxPodPerNode int64 `json:"max_pod_per_node"`
+ NfsStatus string `json:"nfs_status"`
+ NfsDiskSize int64 `json:"nfs_disk_size"`
+
+ StoragePolicy string `json:"storage_policy"`
+ EdgeID string `json:"edge_id"`
+
+ ScaleMin int64 `json:"scale_min"`
+ ScaleMax int64 `json:"scale_max"`
+
+ NodeDNS string `json:"node_dns"`
+ IPPublicFirewall string `json:"ip_public_firewall"`
+ IPPrivateFirewall string `json:"ip_private_firewall"`
+
+ CustomScript string `json:"custom_script"`
+ EnableCustomScript bool `json:"enable_custom_script"`
+ PublicKey string `json:"public_key"`
+ UpstreamDNS string `json:"upstream_dns"`
+ RegionId string `json:"region_id"`
+}
+
+type dedicatedKubernetesEngineData struct {
+ ID string `json:"id"`
+ ClusterID string `json:"cluster_id"`
+ VpcID string `json:"vpc_id"`
+ EdgeID string `json:"edge_id"`
+ Name string `json:"name"`
+ AwxParams string `json:"awx_params"`
+ Status string `json:"status"`
+ NetworkID string `json:"network_id"`
+ NfsDiskSize int `json:"nfs_disk_size"`
+ NfsStatus string `json:"nfs_status"`
+ ErrorMessage interface{} `json:"error_message"`
+ IsRunning bool `json:"is_running"`
+ AutoScale string `json:"auto_scale"`
+ ScaleMin int `json:"scale_min"`
+ ScaleMax int `json:"scale_max"`
+ Templates string `json:"templates"`
+ NetworkName string `json:"network_name"`
+}
+
+type dedicatedKubernetesEngineParams struct {
+ VcdURL string `json:"vcd_url"`
+ PublicDomain string `json:"public_domain"`
+ ClusterID string `json:"cluster_id"`
+ ClusterName string `json:"cluster_name"`
+ OrgName string `json:"org_name"`
+ VdcName string `json:"vdc_name"`
+ MasterType string `json:"master_type"`
+ MasterOs string `json:"master_os"`
+ MasterCPU int `json:"master_cpu"`
+ MasterRAM int `json:"master_ram"`
+ MasterCount int `json:"master_count"`
+ MasterDiskSize int `json:"master_disk_size"`
+ WorkerOs string `json:"worker_os"`
+ WorkerCPU int `json:"worker_cpu"`
+ WorkerRAM int `json:"worker_ram"`
+ WorkerCount int `json:"worker_count"`
+ WorkerType string `json:"worker_type"`
+ WorkerDiskSize int `json:"worker_disk_size"`
+ VMPass string `json:"vm_pass"`
+ StorageProfile string `json:"storage_profile"`
+ VMNetwork string `json:"vm_network"`
+ EdgeGatewayID string `json:"edge_gateway_id"`
+ K8SVersion string `json:"k8s_version"`
+ PodNetwork string `json:"pod_network"`
+ PodPrefix string `json:"pod_prefix"`
+ ServiceNetwork string `json:"service_network"`
+ ServicePrefix string `json:"service_prefix"`
+ NetworkNodePrefix int `json:"network_node_prefix"`
+ K8SMaxPod int `json:"k8s_max_pod"`
+ IPPublic string `json:"ip_public"`
+ IDServiceEngineGroup string `json:"id_service_engine_group"`
+ VirtualIPAddress interface{} `json:"virtual_ip_address"`
+ NfsStatus string `json:"nfs_status"`
+ NfsDiskSize int `json:"nfs_disk_size"`
+ LbSize string `json:"lb_size"`
+ DashboardLink string `json:"dashboard_link"`
+ APILink string `json:"api_link"`
+ UserName string `json:"user_name"`
+ AwxJobType string `json:"awx_job_type"`
+ AutoScaleStatus string `json:"auto_scale_status"`
+ ScaleMinSize int `json:"scale_min_size"`
+ ScaleMaxSize int `json:"scale_max_size"`
+ VpcID string `json:"vpc_id"`
+ NodeDNS string `json:"node_dns"`
+ CallbackURL string `json:"callback_url"`
+ CallbackAction string `json:"callback_action"`
+ AccessToken string `json:"access_token"`
+ IPPublicFirewall string `json:"ip_public_firewall"`
+ IPPrivateFirewall string `json:"ip_private_firewall"`
+ CustomScript string `json:"custom_script"`
+ EnableCustomScript bool `json:"enable_custom_script"`
+ VcdProvider string `json:"vcd_provider"`
+ VcdPod string `json:"vcd_pod"`
+ RequestUserID string `json:"request_user_id"`
+}
+
+type dedicatedKubernetesEngineCreateResponse struct {
+ Cluster dedicatedKubernetesEngineData `json:"cluster"`
+}
+
+type dedicatedKubernetesEngineReadResponse struct {
+ Cluster dedicatedKubernetesEngineData `json:"cluster"`
+}
+
+type dedicatedKubernetesEngineUpgradeVersion struct {
+ ClusterId string `json:"cluster_id"`
+ VersionUpgrade string `json:"version_upgrade"`
+}
+
+type dedicatedKubernetesEngineManagement struct {
+ ClusterId string `json:"cluster_id"`
+ MgmtAction string `json:"mgmt_action"`
+ DiskExtend string `json:"disk_extend"`
+ ExtendType string `json:"extend_type"`
+ Flavor string `json:"flavor"`
+ NodeType string `json:"node_type"`
+}
+
+type dedicatedKubernetesEngineAutoscale struct {
+ ClusterId string `json:"cluster_id"`
+ ScaleMin int64 `json:"scale_min"`
+ ScaleMax int64 `json:"scale_max"`
+ ActionScale string `json:"action_scale"`
+}
diff --git a/fptcloud/dfke/resource_dfke_state.go b/fptcloud/dfke/resource_dfke_state.go
new file mode 100644
index 0000000..f161db5
--- /dev/null
+++ b/fptcloud/dfke/resource_dfke_state.go
@@ -0,0 +1,212 @@
+package fptcloud_dfke
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ diag2 "github.com/hashicorp/terraform-plugin-framework/diag"
+ "github.com/hashicorp/terraform-plugin-framework/resource"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/boolplanmodifier"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+ "github.com/hashicorp/terraform-plugin-log/tflog"
+ "terraform-provider-fptcloud/commons"
+)
+
+var (
+ _ resource.Resource = &resourceDedicatedKubernetesEngineState{}
+ _ resource.ResourceWithConfigure = &resourceDedicatedKubernetesEngineState{}
+ _ resource.ResourceWithImportState = &resourceDedicatedKubernetesEngineState{}
+)
+
+type resourceDedicatedKubernetesEngineState struct {
+ client *commons.Client
+}
+
+func NewResourceDedicatedKubernetesEngineState() resource.Resource {
+ return &resourceDedicatedKubernetesEngineState{}
+}
+
+func (r *resourceDedicatedKubernetesEngineState) ImportState(ctx context.Context, request resource.ImportStateRequest, response *resource.ImportStateResponse) {
+ tflog.Info(ctx, "Importing state for DFKE cluster ID "+request.ID)
+
+ var state dedicatedKubernetesEngineState
+ state.Id = types.StringValue(request.ID)
+
+ state.VpcId = types.StringValue("188af427-269b-418a-90bb-0cb27afc6c1e")
+
+ err := r.internalRead(ctx, request.ID, &state)
+ if err != nil {
+ response.Diagnostics.Append(diag2.NewErrorDiagnostic("Error calling API", err.Error()))
+ return
+ }
+
+ diags := response.State.Set(ctx, &state)
+ response.Diagnostics.Append(diags...)
+ if response.Diagnostics.HasError() {
+ return
+ }
+
+ // lack of ability to import without VPC ID
+ response.Diagnostics.Append(diag2.NewErrorDiagnostic("Unimplemented", "Importing DFKE clusters isn't currently supported"))
+}
+
+func (r *resourceDedicatedKubernetesEngineState) Metadata(ctx context.Context, request resource.MetadataRequest, response *resource.MetadataResponse) {
+ response.TypeName = request.ProviderTypeName + "_dedicated_kubernetes_engine_v1_state"
+}
+
+func (r *resourceDedicatedKubernetesEngineState) Schema(ctx context.Context, request resource.SchemaRequest, response *resource.SchemaResponse) {
+ response.Schema = schema.Schema{
+ Description: "Manage dedicated FKE cluster state",
+ Attributes: map[string]schema.Attribute{
+ "id": schema.StringAttribute{
+ Required: true,
+ PlanModifiers: forceNewPlanModifiersString,
+ },
+ "vpc_id": schema.StringAttribute{
+ Required: true,
+ PlanModifiers: forceNewPlanModifiersString,
+ },
+ "is_running": schema.BoolAttribute{
+ Required: true,
+ PlanModifiers: []planmodifier.Bool{
+ boolplanmodifier.RequiresReplace(),
+ },
+ },
+ },
+ }
+}
+
+func (r *resourceDedicatedKubernetesEngineState) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) {
+ var state dedicatedKubernetesEngineState
+ diags := request.Plan.Get(ctx, &state)
+
+ response.Diagnostics.Append(diags...)
+ if response.Diagnostics.HasError() {
+ return
+ }
+
+ if err := r.internalRead(ctx, state.Id.ValueString(), &state); err != nil {
+ response.Diagnostics.Append(diag2.NewErrorDiagnostic("Error reading cluster state", err.Error()))
+ return
+ }
+
+ diags = response.State.Set(ctx, &state)
+ response.Diagnostics.Append(diags...)
+ if response.Diagnostics.HasError() {
+ return
+ }
+}
+
+func (r *resourceDedicatedKubernetesEngineState) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) {
+ var state dedicatedKubernetesEngineState
+ diags := request.State.Get(ctx, &state)
+
+ response.Diagnostics.Append(diags...)
+ if response.Diagnostics.HasError() {
+ return
+ }
+
+ if err := r.internalRead(ctx, state.Id.ValueString(), &state); err != nil {
+ response.Diagnostics.Append(diag2.NewErrorDiagnostic("Error reading cluster state", err.Error()))
+ return
+ }
+
+ diags = response.State.Set(ctx, &state)
+ response.Diagnostics.Append(diags...)
+ if response.Diagnostics.HasError() {
+ return
+ }
+}
+
+func (r *resourceDedicatedKubernetesEngineState) Update(ctx context.Context, request resource.UpdateRequest, response *resource.UpdateResponse) {
+ var state dedicatedKubernetesEngineState
+ diags := request.State.Get(ctx, &state)
+
+ response.Diagnostics.Append(diags...)
+ if response.Diagnostics.HasError() {
+ return
+ }
+
+ management := dedicatedKubernetesEngineManagement{
+ ClusterId: state.Id.ValueString(),
+ MgmtAction: "stop",
+ DiskExtend: "0",
+ ExtendType: "",
+ Flavor: "",
+ NodeType: "",
+ }
+
+ path := commons.ApiPath.DedicatedFKEManagement(state.VpcId.ValueString(), state.Id.ValueString())
+
+ a, err2 := r.client.SendPostRequest(path, management)
+ if err2 != nil {
+ d := diag2.NewErrorDiagnostic("Error calling management API", err2.Error())
+ response.Diagnostics.Append(d)
+ return
+ }
+
+ if diagErr2 := checkForError(a); diagErr2 != nil {
+ response.Diagnostics.Append(diagErr2)
+ return
+ }
+}
+
+func (r *resourceDedicatedKubernetesEngineState) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) {
+ response.Diagnostics.AddError("Unsupported operation", "Deleting state of a cluster isn't supported")
+}
+
+func (r *resourceDedicatedKubernetesEngineState) Configure(ctx context.Context, request resource.ConfigureRequest, response *resource.ConfigureResponse) {
+ if request.ProviderData == nil {
+ return
+ }
+
+ client, ok := request.ProviderData.(*commons.Client)
+ if !ok {
+ response.Diagnostics.AddError(
+ "Unexpected Resource Configure Type",
+ fmt.Sprintf("Expected *commons.Client, got: %T. Please report this issue to the provider developers.", request.ProviderData),
+ )
+
+ return
+ }
+
+ r.client = client
+}
+
+func (r *resourceDedicatedKubernetesEngineState) internalRead(ctx context.Context, clusterId string, state *dedicatedKubernetesEngineState) error {
+ vpcId := state.VpcId.ValueString()
+ tflog.Info(ctx, "Reading state of cluster ID "+clusterId+", VPC ID "+vpcId)
+
+ a, err := r.client.SendGetRequest(commons.ApiPath.DedicatedFKEGet(vpcId, clusterId))
+
+ if err != nil {
+ return err
+ }
+
+ var d dedicatedKubernetesEngineReadResponse
+ err = json.Unmarshal(a, &d)
+ if err != nil {
+ return err
+ }
+
+ data := d.Cluster
+ if data.Status != "STOPPED" && !data.IsRunning {
+ return errors.New("cluster is not running, but status is " + data.Status + " instead of STOPPED")
+ }
+
+ if data.Status != "SUCCEEDED" && data.IsRunning {
+ return errors.New("cluster is running, but status is " + data.Status + " instead of SUCCEEDED")
+ }
+
+ state.IsRunning = types.BoolValue(data.IsRunning)
+ return nil
+}
+
+type dedicatedKubernetesEngineState struct {
+ Id types.String `tfsdk:"id"`
+ VpcId types.String `tfsdk:"vpc_id"`
+ IsRunning types.Bool `tfsdk:"is_running"`
+}
diff --git a/fptcloud/dfke/tenancy_service.go b/fptcloud/dfke/tenancy_service.go
new file mode 100644
index 0000000..54ccfce
--- /dev/null
+++ b/fptcloud/dfke/tenancy_service.go
@@ -0,0 +1,124 @@
+package fptcloud_dfke
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "github.com/hashicorp/terraform-plugin-log/tflog"
+ "strings"
+ "terraform-provider-fptcloud/commons"
+ fptcloud_vpc "terraform-provider-fptcloud/fptcloud/vpc"
+)
+
+type TenancyApiClient struct {
+ *commons.Client
+}
+
+func NewTenancyApiClient(c *commons.Client) *TenancyApiClient {
+ return &TenancyApiClient{c}
+}
+
+func (t *TenancyApiClient) GetTenancy(ctx context.Context) (*EnabledTenants, error) {
+ tflog.Info(ctx, "Getting enabled tenants")
+
+ path := "/v1/vmware/user/tenants/enabled"
+ res, err := t.SendGetRequest(path)
+ if err != nil {
+ return nil, err
+ }
+
+ var ret EnabledTenants
+ err = json.Unmarshal(res, &ret)
+ if err != nil {
+ return nil, err
+ }
+ return &ret, nil
+}
+
+func (t *TenancyApiClient) GetRegions(ctx context.Context, tenantId string) ([]Region, error) {
+ tflog.Info(ctx, "Getting regions under tenant "+tenantId)
+ path := fmt.Sprintf("/v1/vmware/org/%s/list/regions", tenantId)
+ res, err := t.SendGetRequest(path)
+ if err != nil {
+ return nil, err
+ }
+ var ret RegionResponse
+ err = json.Unmarshal(res, &ret)
+ if err != nil {
+ return nil, err
+ }
+
+ return ret.Regions, nil
+}
+
+func (t *TenancyApiClient) ListVpcs(ctx context.Context, tenantId string, userId string, region string) ([]fptcloud_vpc.VPC, error) {
+ tflog.Info(ctx, "Getting regions under tenant "+tenantId+", user "+userId+", region "+region)
+
+ path := fmt.Sprintf("/v1/vmware/org/%s/user/%s/list/vpc?regionId=%s", tenantId, userId, region)
+ res, err := t.SendGetRequest(path)
+
+ if err != nil {
+ return nil, err
+ }
+
+ var ret ListVpcResponse
+ err = json.Unmarshal(res, &ret)
+ if err != nil {
+ return nil, err
+ }
+
+ return ret.VpcList, nil
+}
+
+func (t *TenancyApiClient) GetVpcPlatform(ctx context.Context, vpcId string) (string, error) {
+ tenants, err := t.GetTenancy(ctx)
+ if err != nil {
+ return "", err
+ }
+
+ path := fmt.Sprintf("/v1/vmware/vpc/%s/user/%s/vpc_user", vpcId, tenants.UserId)
+
+ tflog.Info(ctx, "Getting platform for VPC "+vpcId)
+ res, err := t.SendGetRequest(path)
+ if err != nil {
+ return "", err
+ }
+
+ var ret vpcUserResponse
+ err = json.Unmarshal(res, &ret)
+ if err != nil {
+ return "", err
+ }
+
+ tflog.Info(ctx, "Platform for VPC "+vpcId+" is "+ret.User.Platform)
+
+ return strings.ToUpper(ret.User.Platform), nil
+}
+
+type vpcUserResponse struct {
+ User vpcUser `json:"data"`
+}
+
+type vpcUser struct {
+ UserId string `json:"user_id"`
+ VpcId string `json:"vpc_id"`
+ Platform string `json:"platform"`
+}
+
+type EnabledTenants struct {
+ UserId string `json:"id"`
+ Tenants []fptcloud_vpc.Tenant `json:"tenants"`
+}
+
+type Region struct {
+ Id string `json:"id"`
+ Abbr string `json:"abbreviation_name"`
+}
+
+type RegionResponse struct {
+ Regions []Region `json:"data"`
+}
+
+type ListVpcResponse struct {
+ VpcList []fptcloud_vpc.VPC `json:"data"`
+}
diff --git a/fptcloud/edge_gateway/datasource_edge_gateway.go b/fptcloud/edge_gateway/datasource_edge_gateway.go
new file mode 100644
index 0000000..0f6f367
--- /dev/null
+++ b/fptcloud/edge_gateway/datasource_edge_gateway.go
@@ -0,0 +1,148 @@
+package fptcloud_edge_gateway
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "github.com/hashicorp/terraform-plugin-framework/datasource"
+ "github.com/hashicorp/terraform-plugin-framework/datasource/schema"
+ diag2 "github.com/hashicorp/terraform-plugin-framework/diag"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+ common "terraform-provider-fptcloud/commons"
+)
+
+var (
+ _ datasource.DataSource = &datasourceEdgeGateway{}
+ _ datasource.DataSourceWithConfigure = &datasourceEdgeGateway{}
+)
+
+type datasourceEdgeGateway struct {
+ client *common.Client
+}
+
+func NewDataSourceEdgeGateway() datasource.DataSource {
+ return &datasourceEdgeGateway{}
+}
+
+func (d *datasourceEdgeGateway) Metadata(ctx context.Context, request datasource.MetadataRequest, response *datasource.MetadataResponse) {
+ response.TypeName = request.ProviderTypeName + "_edge_gateway"
+}
+
+func (d *datasourceEdgeGateway) Schema(ctx context.Context, request datasource.SchemaRequest, response *datasource.SchemaResponse) {
+ response.Schema = schema.Schema{
+ Description: "Retrieves information about FPT Cloud edge gateway",
+ Attributes: map[string]schema.Attribute{
+ "id": schema.StringAttribute{
+ Computed: true,
+ Description: "Identifier of the edge_gateway",
+ },
+ "name": schema.StringAttribute{
+ Required: true,
+ Description: "Name of the compute edge_gateway",
+ },
+ "edge_gateway_id": schema.StringAttribute{
+ Computed: true,
+ Description: "Edge gateway id",
+ },
+ "vpc_id": schema.StringAttribute{
+ Required: true,
+ Description: "VPC id",
+ },
+ },
+ }
+}
+
+func (d *datasourceEdgeGateway) Read(ctx context.Context, request datasource.ReadRequest, response *datasource.ReadResponse) {
+ var state edge_gateway
+ diags := request.Config.Get(ctx, &state)
+ response.Diagnostics.Append(diags...)
+ if response.Diagnostics.HasError() {
+ return
+ }
+
+ edgeGatewayList, err := d.internalRead(ctx, &state)
+ if err != nil {
+ response.Diagnostics.Append(diag2.NewErrorDiagnostic("Error getting edge_gateway list", err.Error()))
+ return
+ }
+
+ var foundEdgeGateway edgeGatewayData
+ for _, edgeGateway := range *edgeGatewayList {
+ if edgeGateway.Name == state.Name.ValueString() {
+ foundEdgeGateway = edgeGateway
+ break
+ }
+ }
+
+ if foundEdgeGateway.Id == "" {
+ response.Diagnostics.Append(diag2.NewErrorDiagnostic(
+ "No such edge_gateway",
+ fmt.Sprintf("No matching edge_gateway with name %s was found", state.Name.ValueString()),
+ ))
+ return
+ }
+
+ state.Id = types.StringValue(foundEdgeGateway.Id)
+ state.EdgeGatewayId = types.StringValue(foundEdgeGateway.EdgeGatewayId)
+ state.VpcId = types.StringValue(foundEdgeGateway.VpcId)
+ state.Name = types.StringValue(foundEdgeGateway.Name)
+
+ diags = response.State.Set(ctx, &state)
+ response.Diagnostics.Append(diags...)
+ if response.Diagnostics.HasError() {
+ return
+ }
+}
+
+func (d *datasourceEdgeGateway) Configure(ctx context.Context, request datasource.ConfigureRequest, response *datasource.ConfigureResponse) {
+ if request.ProviderData == nil {
+ return
+ }
+
+ client, ok := request.ProviderData.(*common.Client)
+ if !ok {
+ response.Diagnostics.AddError(
+ "Unexpected Data Source Configure Type",
+ fmt.Sprintf("Expected *internal.ClientV1, got: %T. Please report this issue to the provider developers.", request.ProviderData),
+ )
+
+ return
+ }
+
+ d.client = client
+}
+
+func (d *datasourceEdgeGateway) internalRead(_ context.Context, state *edge_gateway) (*[]edgeGatewayData, error) {
+ vpcId := state.VpcId.ValueString()
+
+ res, err := d.client.SendGetRequest(common.ApiPath.EdgeGatewayList(vpcId))
+
+ if err != nil {
+ return nil, err
+ }
+
+ var r edgeGatewayResponse
+ if err = json.Unmarshal(res, &r); err != nil {
+ return nil, err
+ }
+
+ return &r.Data, nil
+}
+
+type edge_gateway struct {
+ Id types.String `tfsdk:"id"`
+ Name types.String `tfsdk:"name"`
+ EdgeGatewayId types.String `tfsdk:"edge_gateway_id"`
+ VpcId types.String `tfsdk:"vpc_id"`
+}
+
+type edgeGatewayData struct {
+ Id string `json:"id"`
+ Name string `json:"name"`
+ EdgeGatewayId string `json:"edge_gateway_id"`
+ VpcId string `json:"vpc_id"`
+}
+
+type edgeGatewayResponse struct {
+ Data []edgeGatewayData `json:"data"`
+}
diff --git a/fptcloud/mfke/datasource_mfke.go b/fptcloud/mfke/datasource_mfke.go
new file mode 100644
index 0000000..1ad8bba
--- /dev/null
+++ b/fptcloud/mfke/datasource_mfke.go
@@ -0,0 +1,297 @@
+package fptcloud_mfke
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "github.com/hashicorp/terraform-plugin-framework/datasource"
+ "github.com/hashicorp/terraform-plugin-framework/datasource/schema"
+ diag2 "github.com/hashicorp/terraform-plugin-framework/diag"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+ "github.com/hashicorp/terraform-plugin-log/tflog"
+ "strings"
+ "terraform-provider-fptcloud/commons"
+ fptcloud_dfke "terraform-provider-fptcloud/fptcloud/dfke"
+ fptcloud_subnet "terraform-provider-fptcloud/fptcloud/subnet"
+)
+
+var (
+ _ datasource.DataSource = &datasourceManagedKubernetesEngine{}
+ _ datasource.DataSourceWithConfigure = &datasourceManagedKubernetesEngine{}
+)
+
+type datasourceManagedKubernetesEngine struct {
+ client *commons.Client
+ mfkeClient *MfkeApiClient
+ subnetClient fptcloud_subnet.SubnetService
+ tenancyClient *fptcloud_dfke.TenancyApiClient
+}
+
+func (d *datasourceManagedKubernetesEngine) Configure(ctx context.Context, request datasource.ConfigureRequest, response *datasource.ConfigureResponse) {
+ if request.ProviderData == nil {
+ return
+ }
+
+ client, ok := request.ProviderData.(*commons.Client)
+ if !ok {
+ response.Diagnostics.AddError(
+ "Unexpected Resource Configure Type",
+ fmt.Sprintf("Expected *commons.Client, got: %T. Please report this issue to the provider developers.", request.ProviderData),
+ )
+
+ return
+ }
+
+ d.client = client
+ d.mfkeClient = newMfkeApiClient(d.client)
+ d.subnetClient = fptcloud_subnet.NewSubnetService(d.client)
+ d.tenancyClient = fptcloud_dfke.NewTenancyApiClient(d.client)
+}
+
+func (d *datasourceManagedKubernetesEngine) Metadata(ctx context.Context, request datasource.MetadataRequest, response *datasource.MetadataResponse) {
+ response.TypeName = request.ProviderTypeName + "_managed_kubernetes_engine_v1"
+}
+
+func (d *datasourceManagedKubernetesEngine) Schema(ctx context.Context, request datasource.SchemaRequest, response *datasource.SchemaResponse) {
+ topLevelAttributes := d.topFields()
+ poolAttributes := d.poolFields()
+
+ topLevelAttributes["id"] = schema.StringAttribute{
+ Computed: true,
+ }
+ topLevelAttributes["cluster_name"] = schema.StringAttribute{
+ Required: true,
+ }
+ topLevelAttributes["vpc_id"] = schema.StringAttribute{
+ Required: true,
+ }
+
+ response.Schema = schema.Schema{
+ Description: "Manage managed FKE clusters.",
+ Attributes: topLevelAttributes,
+ }
+
+ response.Schema.Blocks = map[string]schema.Block{
+ "pools": schema.ListNestedBlock{
+ NestedObject: schema.NestedBlockObject{
+ Attributes: poolAttributes,
+ },
+ },
+ }
+}
+
+func (d *datasourceManagedKubernetesEngine) Read(ctx context.Context, request datasource.ReadRequest, response *datasource.ReadResponse) {
+ var state managedKubernetesEngine
+ diags := request.Config.Get(ctx, &state)
+
+ response.Diagnostics.Append(diags...)
+ if response.Diagnostics.HasError() {
+ return
+ }
+
+ _, err := d.internalRead(ctx, state.ClusterName.ValueString(), &state)
+ if err != nil {
+ response.Diagnostics.Append(diag2.NewErrorDiagnostic("Error calling API", err.Error()))
+ return
+ }
+
+ diags = response.State.Set(ctx, &state)
+ response.Diagnostics.Append(diags...)
+ if response.Diagnostics.HasError() {
+ return
+ }
+}
+
+func NewDataSourceManagedKubernetesEngine() datasource.DataSource {
+ return &datasourceManagedKubernetesEngine{}
+}
+
+func (d *datasourceManagedKubernetesEngine) internalRead(ctx context.Context, id string, state *managedKubernetesEngine) (*managedKubernetesEngineReadResponse, error) {
+ vpcId := state.VpcId.ValueString()
+ tflog.Info(ctx, "Reading state of cluster ID "+id+", VPC ID "+vpcId)
+
+ platform, err := d.tenancyClient.GetVpcPlatform(ctx, vpcId)
+ if err != nil {
+ return nil, err
+ }
+
+ platform = strings.ToLower(platform)
+
+ path := commons.ApiPath.ManagedFKEGet(vpcId, platform, id)
+ a, err := d.mfkeClient.sendGet(path, platform)
+ if err != nil {
+ return nil, err
+ }
+
+ var response managedKubernetesEngineReadResponse
+ err = json.Unmarshal(a, &response)
+ if err != nil {
+ return nil, err
+ }
+
+ if response.Error {
+ return nil, fmt.Errorf("error: %v", response.Mess)
+ }
+
+ data := response.Data
+
+ state.Id = types.StringValue(data.Metadata.Name)
+ //state.ClusterName = types.StringValue(d.getClusterName(data.Metadata.Name))
+ state.VpcId = types.StringValue(vpcId)
+ // keep clusterName
+ //state.NetworkID
+ state.K8SVersion = types.StringValue(data.Spec.Kubernetes.Version)
+
+ cloudPurpose := strings.Split(data.Spec.SeedSelector.MatchLabels.GardenerCloudPurpose, "-")
+ state.Purpose = types.StringValue(cloudPurpose[0])
+
+ poolNames, err := validatePoolNames(state.Pools)
+ if err != nil {
+ return nil, err
+ }
+
+ workers := map[string]*managedKubernetesEngineDataWorker{}
+ for _, worker := range data.Spec.Provider.Workers {
+ workers[worker.Name] = worker
+
+ if len(state.Pools) == 0 {
+ poolNames = append(poolNames, worker.Name)
+ }
+ }
+
+ var pool []*managedKubernetesEnginePool
+
+ for _, name := range poolNames {
+ w, ok := workers[name]
+ if !ok {
+ continue
+ }
+
+ flavorPoolKey := "fptcloud.com/flavor_pool_" + name
+ flavorId, ok := data.Metadata.Labels[flavorPoolKey]
+ if !ok {
+ return nil, errors.New("missing flavor ID on label " + flavorPoolKey)
+ }
+
+ autoRepair := w.AutoRepair()
+
+ networkId, e := getNetworkIdByPlatform(ctx, d.subnetClient, vpcId, platform, w, &data)
+
+ if e != nil {
+ return nil, e
+ }
+
+ item := managedKubernetesEnginePool{
+ WorkerPoolID: types.StringValue(w.Name),
+ StorageProfile: types.StringValue(w.Volume.Type),
+ WorkerType: types.StringValue(flavorId),
+ WorkerDiskSize: types.Int64Value(int64(parseNumber(w.Volume.Size))),
+ AutoScale: types.BoolValue(w.Maximum != w.Minimum),
+ ScaleMin: types.Int64Value(int64(w.Minimum)),
+ ScaleMax: types.Int64Value(int64(w.Maximum)),
+ NetworkName: types.StringValue(w.ProviderConfig.NetworkName),
+ NetworkID: types.StringValue(networkId),
+ IsEnableAutoRepair: types.BoolValue(autoRepair),
+ //DriverInstallationType: types.String{},
+ //GpuDriverVersion: types.StringValue(gpuDriverVersion),
+ }
+
+ pool = append(pool, &item)
+ }
+
+ state.Pools = pool
+
+ podNetwork := strings.Split(data.Spec.Networking.Pods, "/")
+ state.PodNetwork = types.StringValue(podNetwork[0])
+ state.PodPrefix = types.StringValue(podNetwork[1])
+
+ serviceNetwork := strings.Split(data.Spec.Networking.Services, "/")
+ state.ServiceNetwork = types.StringValue(serviceNetwork[0])
+ state.ServicePrefix = types.StringValue(serviceNetwork[1])
+
+ state.K8SMaxPod = types.Int64Value(int64(data.Spec.Kubernetes.Kubelet.MaxPods))
+ // state.NetworkNodePrefix
+ state.RangeIPLbStart = types.StringValue(data.Spec.Provider.InfrastructureConfig.Networks.LbIPRangeStart)
+ state.RangeIPLbEnd = types.StringValue(data.Spec.Provider.InfrastructureConfig.Networks.LbIPRangeEnd)
+
+ state.LoadBalancerType = types.StringValue(data.Spec.LoadBalancerType)
+
+ return &response, nil
+}
+
+func (d *datasourceManagedKubernetesEngine) topFields() map[string]schema.Attribute {
+ topLevelAttributes := map[string]schema.Attribute{}
+ requiredStrings := []string{
+ "vpc_id", "cluster_name", "k8s_version", "purpose",
+ "pod_network", "pod_prefix", "service_network", "service_prefix",
+ "range_ip_lb_start", "range_ip_lb_end", "load_balancer_type",
+ }
+
+ requiredInts := []string{"k8s_max_pod", "network_node_prefix"}
+
+ for _, attribute := range requiredStrings {
+ topLevelAttributes[attribute] = schema.StringAttribute{
+ Computed: true,
+ }
+ }
+
+ for _, attribute := range requiredInts {
+ topLevelAttributes[attribute] = schema.Int64Attribute{
+ Computed: true,
+ }
+ }
+
+ topLevelAttributes["k8s_version"] = schema.StringAttribute{
+ Computed: true,
+ }
+ topLevelAttributes["network_node_prefix"] = schema.Int64Attribute{
+ Computed: true,
+ }
+
+ return topLevelAttributes
+}
+func (d *datasourceManagedKubernetesEngine) poolFields() map[string]schema.Attribute {
+ poolLevelAttributes := map[string]schema.Attribute{}
+ requiredStrings := []string{
+ "name",
+ "storage_profile", "worker_type",
+ "network_name", "network_id",
+ //"driver_installation_type", "gpu_driver_version",
+ }
+ requiredInts := []string{
+ "worker_disk_size", "scale_min", "scale_max",
+ }
+
+ requiredBool := []string{
+ "auto_scale", "is_enable_auto_repair",
+ }
+
+ for _, attribute := range requiredStrings {
+ poolLevelAttributes[attribute] = schema.StringAttribute{
+ Computed: true,
+ }
+ }
+
+ for _, attribute := range requiredInts {
+ poolLevelAttributes[attribute] = schema.Int64Attribute{
+ Computed: true,
+ }
+ }
+
+ for _, attribute := range requiredBool {
+ poolLevelAttributes[attribute] = schema.BoolAttribute{
+ Computed: true,
+ }
+ }
+
+ poolLevelAttributes["scale_min"] = schema.Int64Attribute{
+ Computed: true,
+ }
+
+ poolLevelAttributes["scale_max"] = schema.Int64Attribute{
+ Computed: true,
+ }
+
+ return poolLevelAttributes
+}
diff --git a/fptcloud/mfke/mfke_descriptions.go b/fptcloud/mfke/mfke_descriptions.go
new file mode 100644
index 0000000..3eb4dfc
--- /dev/null
+++ b/fptcloud/mfke/mfke_descriptions.go
@@ -0,0 +1,29 @@
+package fptcloud_mfke
+
+var descriptions = map[string]string{
+ "vpc_id": "VPC ID",
+ "cluster_name": "Cluster name",
+ "k8s_version": "Kubernetes version",
+ "purpose": "Cluster purpose",
+ "pod_network": "Pod network (subnet ID)",
+ "pod_prefix": "Pod network (prefix)",
+ "service_network": "Service network (subnet ID)",
+ "service_prefix": "Service prefix (prefix)",
+ "range_ip_lb_start": "IP start for range of LB",
+ "range_ip_lb_end": "IP stop for range of LB",
+ "load_balancer_type": "Load balancer type",
+
+ "k8s_max_pod": "Max pods per node",
+ "network_node_prefix": "Network node prefix",
+
+ "name": "Pool name",
+ "storage_profile": "Pool storage profile",
+ "worker_type": "Worker flavor ID",
+ "network_name": "Subnet name",
+ "network_id": "Subnet ID",
+ "worker_disk_size": "Worker disk size",
+ "scale_min": "Minimum number of nodes for autoscaling",
+ "scale_max": "Maximum number of nodes for autoscaling",
+ "auto_scale": "Whether to enable autoscaling",
+ "is_enable_auto_repair": "Whether to enable auto-repair",
+}
diff --git a/fptcloud/mfke/mfke_service.go b/fptcloud/mfke/mfke_service.go
new file mode 100644
index 0000000..d7a9b49
--- /dev/null
+++ b/fptcloud/mfke/mfke_service.go
@@ -0,0 +1,59 @@
+package fptcloud_mfke
+
+import (
+ "bytes"
+ "encoding/json"
+ "net/http"
+ "strings"
+ "terraform-provider-fptcloud/commons"
+)
+
+type MfkeApiClient struct {
+ *commons.Client
+}
+
+func newMfkeApiClient(c *commons.Client) *MfkeApiClient {
+ return &MfkeApiClient{c}
+}
+
+func (m *MfkeApiClient) sendGet(requestURL string, infraType string) ([]byte, error) {
+ u := m.Client.PrepareClientURL(requestURL)
+ req, err := http.NewRequest("GET", u.String(), nil)
+ if err != nil {
+ return nil, err
+ }
+
+ return m.sendRequestWithHeader(req, infraType)
+}
+
+func (m *MfkeApiClient) sendPost(requestURL string, infraType string, params interface{}) ([]byte, error) {
+ u := m.Client.PrepareClientURL(requestURL)
+
+ // we create a new buffer and encode everything to json to send it in the request
+ jsonValue, _ := json.Marshal(params)
+
+ req, err := http.NewRequest("POST", u.String(), bytes.NewBuffer(jsonValue))
+ if err != nil {
+ return nil, err
+ }
+ return m.sendRequestWithHeader(req, infraType)
+}
+
+func (m *MfkeApiClient) sendPatch(requestURL string, infraType string, params interface{}) ([]byte, error) {
+ u := m.Client.PrepareClientURL(requestURL)
+
+ // we create a new buffer and encode everything to json to send it in the request
+ jsonValue, _ := json.Marshal(params)
+
+ req, err := http.NewRequest("PATCH", u.String(), bytes.NewBuffer(jsonValue))
+ if err != nil {
+ return nil, err
+ }
+ return m.sendRequestWithHeader(req, infraType)
+}
+
+func (m *MfkeApiClient) sendRequestWithHeader(request *http.Request, infraType string) ([]byte, error) {
+ request.Header.Set("fpt-region", m.Client.Region)
+ request.Header.Set("infra-type", strings.ToUpper(infraType))
+ return m.Client.SendRequest(request)
+}
diff --git a/fptcloud/mfke/resource_mfke.go b/fptcloud/mfke/resource_mfke.go
new file mode 100644
index 0000000..c3ee506
--- /dev/null
+++ b/fptcloud/mfke/resource_mfke.go
@@ -0,0 +1,1033 @@
+package fptcloud_mfke
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ diag2 "github.com/hashicorp/terraform-plugin-framework/diag"
+ "github.com/hashicorp/terraform-plugin-framework/resource"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/boolplanmodifier"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/int64planmodifier"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+ "github.com/hashicorp/terraform-plugin-log/tflog"
+ "strconv"
+ "strings"
+ "terraform-provider-fptcloud/commons"
+ fptcloud_dfke "terraform-provider-fptcloud/fptcloud/dfke"
+ fptcloud_subnet "terraform-provider-fptcloud/fptcloud/subnet"
+ "unicode"
+)
+
+var (
+ _ resource.Resource = &resourceManagedKubernetesEngine{}
+ _ resource.ResourceWithConfigure = &resourceManagedKubernetesEngine{}
+ _ resource.ResourceWithImportState = &resourceManagedKubernetesEngine{}
+
+ forceNewPlanModifiersString = []planmodifier.String{
+ stringplanmodifier.RequiresReplace(),
+ }
+
+ forceNewPlanModifiersInt = []planmodifier.Int64{
+ int64planmodifier.RequiresReplace(),
+ }
+
+ forceNewPlanModifiersBool = []planmodifier.Bool{
+ boolplanmodifier.RequiresReplace(),
+ }
+)
+
+const (
+ errorCallingApi = "Error calling API"
+ platformVpcErrorPrefix = "Error getting platform for VPC "
+)
+
+type resourceManagedKubernetesEngine struct {
+ client *commons.Client
+ mfkeClient *MfkeApiClient
+ subnetClient fptcloud_subnet.SubnetService
+ tenancyClient *fptcloud_dfke.TenancyApiClient
+}
+
+func NewResourceManagedKubernetesEngine() resource.Resource {
+ return &resourceManagedKubernetesEngine{}
+}
+
+func (r *resourceManagedKubernetesEngine) Metadata(_ context.Context, request resource.MetadataRequest, response *resource.MetadataResponse) {
+ response.TypeName = request.ProviderTypeName + "_managed_kubernetes_engine_v1"
+}
+func (r *resourceManagedKubernetesEngine) Schema(_ context.Context, _ resource.SchemaRequest, response *resource.SchemaResponse) {
+ topLevelAttributes := r.topFields()
+ poolAttributes := r.poolFields()
+
+ topLevelAttributes["id"] = schema.StringAttribute{
+ Computed: true,
+ }
+
+ response.Schema = schema.Schema{
+ Description: "Manage managed FKE clusters.",
+ Attributes: topLevelAttributes,
+ }
+
+ response.Schema.Blocks = map[string]schema.Block{
+ "pools": schema.ListNestedBlock{
+ NestedObject: schema.NestedBlockObject{
+ Attributes: poolAttributes,
+ },
+ },
+ }
+}
+func (r *resourceManagedKubernetesEngine) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) {
+ var state managedKubernetesEngine
+ diags := request.Plan.Get(ctx, &state)
+
+ response.Diagnostics.Append(diags...)
+ if response.Diagnostics.HasError() {
+ return
+ }
+
+ if err := validatePool(state.Pools); err != nil {
+ response.Diagnostics.Append(err)
+ return
+ }
+
+ var f managedKubernetesEngineJson
+ r.remap(&state, &f)
+ errDiag := r.fillJson(ctx, &f, state.VpcId.ValueString())
+
+ if errDiag != nil {
+ response.Diagnostics.Append(errDiag)
+ return
+ }
+
+ platform, err := r.tenancyClient.GetVpcPlatform(ctx, state.VpcId.ValueString())
+ if err != nil {
+ response.Diagnostics.Append(diag2.NewErrorDiagnostic("Error getting VPC platform", err.Error()))
+ return
+ }
+
+ if err := validateNetwork(&state, platform); err != nil {
+ response.Diagnostics.Append(err)
+ return
+ }
+
+ path := commons.ApiPath.ManagedFKECreate(state.VpcId.ValueString(), strings.ToLower(platform))
+ tflog.Info(ctx, "Calling path "+path)
+ a, err := r.mfkeClient.sendPost(path, platform, f)
+
+ if err != nil {
+ response.Diagnostics.Append(diag2.NewErrorDiagnostic(errorCallingApi, err.Error()))
+ return
+ }
+
+ errorResponse := r.checkForError(a)
+ if errorResponse != nil {
+ response.Diagnostics.Append(errorResponse)
+ return
+ }
+
+ var createResponse managedKubernetesEngineCreateResponse
+ if err = json.Unmarshal(a, &createResponse); err != nil {
+ response.Diagnostics.Append(diag2.NewErrorDiagnostic("Error unmarshalling response", err.Error()))
+ return
+ }
+
+ slug := fmt.Sprintf("%s-%s", createResponse.Kpi.ClusterName, createResponse.Kpi.ClusterId)
+
+ tflog.Info(ctx, "Created cluster with id "+slug)
+
+ if _, err = r.internalRead(ctx, slug, &state); err != nil {
+ response.Diagnostics.Append(diag2.NewErrorDiagnostic("Error reading cluster state", err.Error()))
+ return
+ }
+
+ diags = response.State.Set(ctx, &state)
+ response.Diagnostics.Append(diags...)
+ if response.Diagnostics.HasError() {
+ return
+ }
+}
+
+func (r *resourceManagedKubernetesEngine) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) {
+ var state managedKubernetesEngine
+ diags := request.State.Get(ctx, &state)
+
+ response.Diagnostics.Append(diags...)
+ if response.Diagnostics.HasError() {
+ return
+ }
+
+ _, err := r.internalRead(ctx, state.Id.ValueString(), &state)
+ if err != nil {
+ response.Diagnostics.Append(diag2.NewErrorDiagnostic(errorCallingApi, err.Error()))
+ return
+ }
+
+ diags = response.State.Set(ctx, &state)
+ response.Diagnostics.Append(diags...)
+ if response.Diagnostics.HasError() {
+ return
+ }
+}
+
+func (r *resourceManagedKubernetesEngine) Update(ctx context.Context, request resource.UpdateRequest, response *resource.UpdateResponse) {
+ var state managedKubernetesEngine
+ diags := request.State.Get(ctx, &state)
+
+ response.Diagnostics.Append(diags...)
+ if response.Diagnostics.HasError() {
+ return
+ }
+
+ var plan managedKubernetesEngine
+ request.Plan.Get(ctx, &plan)
+
+ response.Diagnostics.Append(diags...)
+ if response.Diagnostics.HasError() {
+ return
+ }
+
+ errDiag := r.diff(ctx, &state, &plan)
+ if errDiag != nil {
+ response.Diagnostics.Append(errDiag)
+ return
+ }
+
+ _, err := r.internalRead(ctx, state.Id.ValueString(), &state)
+ if err != nil {
+ response.Diagnostics.Append(diag2.NewErrorDiagnostic("Error refreshing state", err.Error()))
+ return
+ }
+
+ diags = response.State.Set(ctx, &state)
+ response.Diagnostics.Append(diags...)
+ if response.Diagnostics.HasError() {
+ return
+ }
+}
+
+func (r *resourceManagedKubernetesEngine) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) {
+ var state managedKubernetesEngine
+ diags := request.State.Get(ctx, &state)
+
+ response.Diagnostics.Append(diags...)
+ if response.Diagnostics.HasError() {
+ return
+ }
+
+ _, err := r.client.SendDeleteRequest(
+ commons.ApiPath.ManagedFKEDelete(state.VpcId.ValueString(), "vmw", state.ClusterName.ValueString()),
+ )
+ if err != nil {
+ response.Diagnostics.Append(diag2.NewErrorDiagnostic(errorCallingApi, err.Error()))
+ return
+ }
+}
+func (r *resourceManagedKubernetesEngine) ImportState(ctx context.Context, request resource.ImportStateRequest, response *resource.ImportStateResponse) {
+ tflog.Info(ctx, "Importing MFKE cluster ID "+request.ID)
+
+ var state managedKubernetesEngine
+
+ id := request.ID
+ pieces := strings.Split(id, "/")
+ if len(pieces) != 2 {
+ response.Diagnostics.Append(diag2.NewErrorDiagnostic("Invalid format", "must be in format vpcId/clusterId"))
+ return
+ }
+
+ vpcId := pieces[0]
+ clusterId := pieces[1]
+
+ state.VpcId = types.StringValue(vpcId)
+
+ state.Id = types.StringValue(clusterId)
+
+ _, err := r.internalRead(ctx, clusterId, &state)
+ if err != nil {
+ response.Diagnostics.Append(diag2.NewErrorDiagnostic(errorCallingApi, err.Error()))
+ return
+ }
+
+ diags := response.State.Set(ctx, &state)
+ response.Diagnostics.Append(diags...)
+ if response.Diagnostics.HasError() {
+ return
+ }
+}
+func (r *resourceManagedKubernetesEngine) Configure(_ context.Context, request resource.ConfigureRequest, response *resource.ConfigureResponse) {
+ if request.ProviderData == nil {
+ return
+ }
+
+ client, ok := request.ProviderData.(*commons.Client)
+ if !ok {
+ response.Diagnostics.AddError(
+ "Unexpected Resource Configure Type",
+ fmt.Sprintf("Expected *commons.Client, got: %T. Please report this issue to the provider developers.", request.ProviderData),
+ )
+
+ return
+ }
+
+ r.client = client
+ r.mfkeClient = newMfkeApiClient(r.client)
+ r.subnetClient = fptcloud_subnet.NewSubnetService(r.client)
+ r.tenancyClient = fptcloud_dfke.NewTenancyApiClient(r.client)
+}
+func (r *resourceManagedKubernetesEngine) topFields() map[string]schema.Attribute {
+ topLevelAttributes := map[string]schema.Attribute{}
+ requiredStrings := []string{
+ "vpc_id", "cluster_name", "k8s_version", "purpose",
+ "pod_network", "pod_prefix", "service_network", "service_prefix",
+ "range_ip_lb_start", "range_ip_lb_end", "load_balancer_type", "network_id",
+ }
+
+ requiredInts := []string{"k8s_max_pod", "network_node_prefix"}
+
+ for _, attribute := range requiredStrings {
+ topLevelAttributes[attribute] = schema.StringAttribute{
+ Required: true,
+ PlanModifiers: forceNewPlanModifiersString,
+ Description: descriptions[attribute],
+ }
+ }
+
+ for _, attribute := range requiredInts {
+ topLevelAttributes[attribute] = schema.Int64Attribute{
+ Required: true,
+ PlanModifiers: forceNewPlanModifiersInt,
+ Description: descriptions[attribute],
+ }
+ }
+
+ topLevelAttributes["k8s_version"] = schema.StringAttribute{
+ Required: true,
+ Description: descriptions["k8s_version"],
+ }
+ topLevelAttributes["network_node_prefix"] = schema.Int64Attribute{
+ Required: true,
+ Description: descriptions["network_node_prefix"],
+ }
+
+ return topLevelAttributes
+}
+func (r *resourceManagedKubernetesEngine) poolFields() map[string]schema.Attribute {
+ poolLevelAttributes := map[string]schema.Attribute{}
+ requiredStrings := []string{
+ "name",
+ "storage_profile", "worker_type",
+ "network_name", "network_id",
+ //"driver_installation_type", "gpu_driver_version",
+ }
+ requiredInts := []string{
+ "worker_disk_size", "scale_min", "scale_max",
+ }
+
+ requiredBool := []string{
+ "auto_scale", "is_enable_auto_repair",
+ }
+
+ for _, attribute := range requiredStrings {
+ poolLevelAttributes[attribute] = schema.StringAttribute{
+ Required: true,
+ PlanModifiers: forceNewPlanModifiersString,
+ Description: descriptions[attribute],
+ }
+ }
+
+ for _, attribute := range requiredInts {
+ poolLevelAttributes[attribute] = schema.Int64Attribute{
+ Required: true,
+ PlanModifiers: forceNewPlanModifiersInt,
+ Description: descriptions[attribute],
+ }
+ }
+
+ for _, attribute := range requiredBool {
+ poolLevelAttributes[attribute] = schema.BoolAttribute{
+ Required: true,
+ PlanModifiers: forceNewPlanModifiersBool,
+ Description: descriptions[attribute],
+ }
+ }
+
+ poolLevelAttributes["scale_min"] = schema.Int64Attribute{
+ Required: true,
+ Description: descriptions["scale_min"],
+ }
+
+ poolLevelAttributes["scale_max"] = schema.Int64Attribute{
+ Required: true,
+ Description: descriptions["scale_max"],
+ }
+
+ return poolLevelAttributes
+}
+
+func (r *resourceManagedKubernetesEngine) fillJson(ctx context.Context, to *managedKubernetesEngineJson, vpcId string) *diag2.ErrorDiagnostic {
+ to.SSHKey = nil
+ to.TypeCreate = "create"
+ to.NetworkType = "calico"
+ for _, pool := range to.Pools {
+ pool.ContainerRuntime = "containerd"
+ pool.DriverInstallationType = "pre-install"
+ pool.GpuDriverVersion = "default"
+ pool.Kv = []struct {
+ Name string `json:"name"`
+ }([]struct{ Name string }{})
+ pool.VGpuID = nil
+ pool.IsDisplayGPU = false
+ pool.IsCreate = true
+ pool.IsScale = false
+ pool.IsOthers = false
+ }
+
+ // get k8s versions
+ version := to.K8SVersion
+ if strings.HasPrefix(version, "v") {
+ version = string([]rune(version)[1:])
+ }
+
+ osVersion, err := r.getOsVersion(ctx, version, vpcId)
+ if err != nil {
+ return err
+ }
+
+ to.OsVersion = osVersion
+
+ return nil
+}
+func (r *resourceManagedKubernetesEngine) remap(from *managedKubernetesEngine, to *managedKubernetesEngineJson) {
+ to.ClusterName = from.ClusterName.ValueString()
+ to.K8SVersion = from.K8SVersion.ValueString()
+ to.Purpose = from.Purpose.ValueString()
+
+ pools := make([]*managedKubernetesEnginePoolJson, 0)
+ for _, item := range from.Pools {
+ name := item.WorkerPoolID.ValueString()
+ newItem := managedKubernetesEnginePoolJson{
+ StorageProfile: item.StorageProfile.ValueString(),
+ WorkerType: item.WorkerType.ValueString(),
+ WorkerDiskSize: item.WorkerDiskSize.ValueInt64(),
+ AutoScale: item.AutoScale.ValueBool(),
+ ScaleMin: item.ScaleMin.ValueInt64(),
+ ScaleMax: item.ScaleMax.ValueInt64(),
+ NetworkName: item.NetworkName.ValueString(),
+ NetworkID: item.NetworkID.ValueString(),
+ IsEnableAutoRepair: item.IsEnableAutoRepair.ValueBool(),
+ WorkerPoolID: &name,
+ }
+
+ pools = append(pools, &newItem)
+ }
+ to.Pools = pools
+
+ to.NetworkID = to.Pools[0].NetworkID
+
+ to.PodNetwork = from.PodNetwork.ValueString()
+ to.PodPrefix = from.PodPrefix.ValueString()
+ to.ServiceNetwork = from.ServiceNetwork.ValueString()
+ to.ServicePrefix = from.ServicePrefix.ValueString()
+ to.K8SMaxPod = from.K8SMaxPod.ValueInt64()
+ to.NetworkNodePrefix = from.NetworkNodePrefix.ValueInt64()
+ to.RangeIPLbStart = from.RangeIPLbStart.ValueString()
+ to.RangeIPLbEnd = from.RangeIPLbEnd.ValueString()
+ to.LoadBalancerType = from.LoadBalancerType.ValueString()
+}
+
+func (r *resourceManagedKubernetesEngine) remapPools(item *managedKubernetesEnginePool, name string) *managedKubernetesEnginePoolJson {
+ newItem := managedKubernetesEnginePoolJson{
+ StorageProfile: item.StorageProfile.ValueString(),
+ WorkerType: item.WorkerType.ValueString(),
+ WorkerDiskSize: item.WorkerDiskSize.ValueInt64(),
+ AutoScale: item.AutoScale.ValueBool(),
+ ScaleMin: item.ScaleMin.ValueInt64(),
+ ScaleMax: item.ScaleMax.ValueInt64(),
+ NetworkName: item.NetworkName.ValueString(),
+ NetworkID: item.NetworkID.ValueString(),
+ IsEnableAutoRepair: item.IsEnableAutoRepair.ValueBool(),
+ WorkerPoolID: &name,
+ }
+
+ return &newItem
+}
+
+func (r *resourceManagedKubernetesEngine) checkForError(a []byte) *diag2.ErrorDiagnostic {
+ var re map[string]interface{}
+ err := json.Unmarshal(a, &re)
+ if err != nil {
+ res := diag2.NewErrorDiagnostic("Error unmarshalling response", err.Error())
+ return &res
+ }
+
+ if e, ok := re["error"]; ok {
+ if e == true {
+ res := diag2.NewErrorDiagnostic("Response contained an error field", "Response body was "+string(a))
+ return &res
+ }
+ }
+
+ return nil
+}
+
+func (r *resourceManagedKubernetesEngine) diff(ctx context.Context, from *managedKubernetesEngine, to *managedKubernetesEngine) *diag2.ErrorDiagnostic {
+ if from.K8SVersion != to.K8SVersion {
+ if err := r.upgradeVersion(ctx, from, to); err != nil {
+ return err
+ }
+ }
+ if from.NetworkNodePrefix != to.NetworkNodePrefix {
+ from.NetworkNodePrefix = to.NetworkNodePrefix
+ }
+
+ editGroup := r.diffPool(ctx, from, to)
+
+ if editGroup {
+ d, err := r.internalRead(ctx, from.Id.ValueString(), from)
+ if err != nil {
+ di := diag2.NewErrorDiagnostic("Error reading cluster state", err.Error())
+ return &di
+ }
+
+ pools := []*managedKubernetesEnginePoolJson{}
+
+ for _, pool := range to.Pools {
+ item := r.remapPools(pool, pool.WorkerPoolID.ValueString())
+ pools = append(pools, item)
+ }
+
+ body := managedKubernetesEngineEditWorker{
+ K8sVersion: to.K8SVersion.ValueString(),
+ CurrentNetworking: d.Data.Spec.Networking.Nodes,
+ Pools: pools,
+ TypeConfigure: "configure",
+ }
+
+ vpcId := from.VpcId.ValueString()
+ platform, err := r.tenancyClient.GetVpcPlatform(ctx, vpcId)
+ if err != nil {
+ d := diag2.NewErrorDiagnostic(platformVpcErrorPrefix+vpcId, err.Error())
+ return &d
+ }
+
+ platform = strings.ToLower(platform)
+
+ path := fmt.Sprintf(
+ "/v1/xplat/fke/vpc/%s/m-fke/%s/configure-worker-cluster/shoots/%s/0",
+ from.VpcId.ValueString(),
+ platform,
+ from.Id.ValueString(),
+ )
+
+ res, err := r.mfkeClient.sendPatch(path, platform, body)
+ if err != nil {
+ d := diag2.NewErrorDiagnostic("Error configuring worker", err.Error())
+ return &d
+ }
+
+ if e2 := r.checkForError(res); e2 != nil {
+ return e2
+ }
+ }
+
+ return nil
+}
+
+func (r *resourceManagedKubernetesEngine) upgradeVersion(ctx context.Context, from, to *managedKubernetesEngine) *diag2.ErrorDiagnostic {
+ // upgrade version
+ vpcId := from.VpcId.ValueString()
+ cluster := from.Id.ValueString()
+ targetVersion := to.K8SVersion.ValueString()
+
+ platform, err := r.tenancyClient.GetVpcPlatform(ctx, vpcId)
+ if err != nil {
+ d := diag2.NewErrorDiagnostic(platformVpcErrorPrefix+vpcId, err.Error())
+ return &d
+ }
+
+ platform = strings.ToLower(platform)
+
+ path := fmt.Sprintf(
+ "/v1/xplat/fke/vpc/%s/m-fke/%s/upgrade_version_cluster/shoots/%s/k8s-version/%s",
+ vpcId,
+ platform,
+ cluster,
+ targetVersion,
+ )
+
+ body, err := r.mfkeClient.sendPatch(path, platform, struct{}{})
+ if err != nil {
+ d := diag2.NewErrorDiagnostic(
+ fmt.Sprintf("Error upgrading version to %s", to.K8SVersion.ValueString()),
+ err.Error(),
+ )
+
+ return &d
+ }
+
+ if diagErr2 := r.checkForError(body); diagErr2 != nil {
+ return diagErr2
+ }
+
+ return nil
+}
+
+func (r *resourceManagedKubernetesEngine) diffPool(_ context.Context, from *managedKubernetesEngine, to *managedKubernetesEngine) bool {
+ fromPool := map[string]*managedKubernetesEnginePool{}
+ toPool := map[string]*managedKubernetesEnginePool{}
+
+ for _, pool := range from.Pools {
+ fromPool[pool.WorkerPoolID.ValueString()] = pool
+ }
+
+ for _, pool := range to.Pools {
+ toPool[pool.WorkerPoolID.ValueString()] = pool
+ }
+
+ if len(fromPool) != len(toPool) {
+ return true
+ }
+
+ for _, pool := range from.Pools {
+ f := fromPool[pool.WorkerPoolID.ValueString()]
+ t := toPool[pool.WorkerPoolID.ValueString()]
+ if f.ScaleMin != t.ScaleMin || f.ScaleMax != t.ScaleMax {
+ return true
+ }
+ }
+
+ return false
+}
+
+func (r *resourceManagedKubernetesEngine) internalRead(ctx context.Context, id string, state *managedKubernetesEngine) (*managedKubernetesEngineReadResponse, error) {
+ vpcId := state.VpcId.ValueString()
+ tflog.Info(ctx, "Reading state of cluster ID "+id+", VPC ID "+vpcId)
+
+ platform, err := r.tenancyClient.GetVpcPlatform(ctx, vpcId)
+ if err != nil {
+ return nil, err
+ }
+
+ platform = strings.ToLower(platform)
+
+ path := commons.ApiPath.ManagedFKEGet(vpcId, platform, id)
+ a, err := r.mfkeClient.sendGet(path, platform)
+ if err != nil {
+ return nil, err
+ }
+
+ var d managedKubernetesEngineReadResponse
+ err = json.Unmarshal(a, &d)
+ if err != nil {
+ return nil, err
+ }
+
+ if d.Error {
+ return nil, fmt.Errorf("error: %v", d.Mess)
+ }
+
+ data := d.Data
+
+ state.Id = types.StringValue(data.Metadata.Name)
+ state.ClusterName = types.StringValue(getClusterName(data.Metadata.Name))
+ state.VpcId = types.StringValue(vpcId)
+ // keep clusterName
+ //state.NetworkID
+ state.K8SVersion = types.StringValue(data.Spec.Kubernetes.Version)
+
+ cloudPurpose := strings.Split(data.Spec.SeedSelector.MatchLabels.GardenerCloudPurpose, "-")
+ state.Purpose = types.StringValue(cloudPurpose[0])
+
+ poolNames, err := validatePoolNames(state.Pools)
+ if err != nil {
+ return nil, err
+ }
+
+ workers := map[string]*managedKubernetesEngineDataWorker{}
+ for _, worker := range data.Spec.Provider.Workers {
+ workers[worker.Name] = worker
+
+ if len(state.Pools) == 0 {
+ poolNames = append(poolNames, worker.Name)
+ }
+ }
+
+ var pool []*managedKubernetesEnginePool
+
+ for _, name := range poolNames {
+ w, ok := workers[name]
+ if !ok {
+ continue
+ }
+
+ flavorPoolKey := "fptcloud.com/flavor_pool_" + name
+ flavorId, ok := data.Metadata.Labels[flavorPoolKey]
+ if !ok {
+ return nil, errors.New("missing flavor ID on label " + flavorPoolKey)
+ }
+
+ autoRepair := w.AutoRepair()
+
+ networkId, e := getNetworkIdByPlatform(ctx, r.subnetClient, vpcId, platform, w, &data)
+ if e != nil {
+ return nil, e
+ }
+
+ item := managedKubernetesEnginePool{
+ WorkerPoolID: types.StringValue(w.Name),
+ StorageProfile: types.StringValue(w.Volume.Type),
+ WorkerType: types.StringValue(flavorId),
+ WorkerDiskSize: types.Int64Value(int64(parseNumber(w.Volume.Size))),
+ AutoScale: types.BoolValue(w.Maximum != w.Minimum),
+ ScaleMin: types.Int64Value(int64(w.Minimum)),
+ ScaleMax: types.Int64Value(int64(w.Maximum)),
+ NetworkName: types.StringValue(w.ProviderConfig.NetworkName),
+ NetworkID: types.StringValue(networkId),
+ IsEnableAutoRepair: types.BoolValue(autoRepair),
+ //DriverInstallationType: types.String{},
+ //GpuDriverVersion: types.StringValue(gpuDriverVersion),
+ }
+
+ pool = append(pool, &item)
+ }
+
+ state.Pools = pool
+
+ podNetwork := strings.Split(data.Spec.Networking.Pods, "/")
+ state.PodNetwork = types.StringValue(podNetwork[0])
+ state.PodPrefix = types.StringValue(podNetwork[1])
+
+ serviceNetwork := strings.Split(data.Spec.Networking.Services, "/")
+ state.ServiceNetwork = types.StringValue(serviceNetwork[0])
+ state.ServicePrefix = types.StringValue(serviceNetwork[1])
+
+ state.K8SMaxPod = types.Int64Value(int64(data.Spec.Kubernetes.Kubelet.MaxPods))
+ // state.NetworkNodePrefix
+ state.RangeIPLbStart = types.StringValue(data.Spec.Provider.InfrastructureConfig.Networks.LbIPRangeStart)
+ state.RangeIPLbEnd = types.StringValue(data.Spec.Provider.InfrastructureConfig.Networks.LbIPRangeEnd)
+
+ state.LoadBalancerType = types.StringValue(data.Spec.LoadBalancerType)
+
+ return &d, nil
+}
+func (r *resourceManagedKubernetesEngine) getOsVersion(ctx context.Context, version string, vpcId string) (interface{}, *diag2.ErrorDiagnostic) {
+ platform, err := r.tenancyClient.GetVpcPlatform(ctx, vpcId)
+ if err != nil {
+ d := diag2.NewErrorDiagnostic(platformVpcErrorPrefix+vpcId, err.Error())
+ return nil, &d
+ }
+
+ platform = strings.ToLower(platform)
+
+ var path = commons.ApiPath.GetFKEOSVersion(vpcId, platform)
+ tflog.Info(ctx, "Getting OS version for version "+version+", VPC ID "+vpcId)
+ tflog.Info(ctx, "Calling "+path)
+
+ res, err := r.mfkeClient.sendGet(path, platform)
+ if err != nil {
+ diag := diag2.NewErrorDiagnostic(errorCallingApi, err.Error())
+ return nil, &diag
+ }
+
+ errorResponse := r.checkForError(res)
+ if errorResponse != nil {
+ return nil, errorResponse
+ }
+
+ var list managedKubernetesEngineOsVersionResponse
+ if err = json.Unmarshal(res, &list); err != nil {
+ diag := diag2.NewErrorDiagnostic(errorCallingApi, err.Error())
+ return nil, &diag
+ }
+
+ for _, item := range list.Data {
+ if item.Value == version {
+ return item.OsVersion, nil
+ }
+ }
+
+ diag := diag2.NewErrorDiagnostic("Error finding OS version", "K8s version "+version+" not found")
+ return nil, &diag
+}
+
+func getClusterName(name string) string {
+ var indices []int
+ for i, c := range name {
+ if c == '-' {
+ indices = append(indices, i)
+ }
+ }
+
+ if len(indices) == 0 {
+ return name
+ }
+
+ last := indices[len(indices)-1]
+ clusterName := string([]rune(name)[:last])
+
+ return clusterName
+}
+
+func parseNumber(s string) int {
+ out := ""
+ for _, c := range s {
+ if unicode.IsDigit(c) {
+ out += string(c)
+ }
+ }
+
+ if out == "" {
+ out = "0"
+ }
+
+ f, _ := strconv.Atoi(out)
+ return f
+}
+
+type managedKubernetesEngine struct {
+ Id types.String `tfsdk:"id"`
+ VpcId types.String `tfsdk:"vpc_id"`
+ ClusterName types.String `tfsdk:"cluster_name"`
+ NetworkID types.String `tfsdk:"network_id"`
+ K8SVersion types.String `tfsdk:"k8s_version"`
+ //OsVersion struct{} `tfsdk:"os_version"`
+ Purpose types.String `tfsdk:"purpose"`
+ Pools []*managedKubernetesEnginePool `tfsdk:"pools"`
+ PodNetwork types.String `tfsdk:"pod_network"`
+ PodPrefix types.String `tfsdk:"pod_prefix"`
+ ServiceNetwork types.String `tfsdk:"service_network"`
+ ServicePrefix types.String `tfsdk:"service_prefix"`
+ K8SMaxPod types.Int64 `tfsdk:"k8s_max_pod"`
+ NetworkNodePrefix types.Int64 `tfsdk:"network_node_prefix"`
+ RangeIPLbStart types.String `tfsdk:"range_ip_lb_start"`
+ RangeIPLbEnd types.String `tfsdk:"range_ip_lb_end"`
+ LoadBalancerType types.String `tfsdk:"load_balancer_type"`
+ //SSHKey interface{} `tfsdk:"sshKey"` // just set it nil
+ //TypeCreate types.String `tfsdk:"type_create"`
+ //RegionId types.String `tfsdk:"region_id"`
+}
+type managedKubernetesEnginePool struct {
+ WorkerPoolID types.String `tfsdk:"name"`
+ StorageProfile types.String `tfsdk:"storage_profile"`
+ WorkerType types.String `tfsdk:"worker_type"`
+ WorkerDiskSize types.Int64 `tfsdk:"worker_disk_size"`
+ //ContainerRuntime types.String `tfsdk:"container_runtime"`
+ AutoScale types.Bool `tfsdk:"auto_scale"`
+ ScaleMin types.Int64 `tfsdk:"scale_min"`
+ ScaleMax types.Int64 `tfsdk:"scale_max"`
+ NetworkName types.String `tfsdk:"network_name"`
+ NetworkID types.String `tfsdk:"network_id"`
+ //Kv []struct {
+ // Name types.String `tfsdk:"name"`
+ //} `tfsdk:"kv"`
+ //VGpuID interface{} `tfsdk:"vGpuId"`
+ //IsDisplayGPU bool `tfsdk:"isDisplayGPU"`
+ //IsCreate types.Bool `tfsdk:"is_create"`
+ //IsScale types.Bool `tfsdk:"is_scale"`
+ //IsOthers types.Bool `tfsdk:"is_others"`
+ IsEnableAutoRepair types.Bool `tfsdk:"is_enable_auto_repair"`
+ //DriverInstallationType types.String `tfsdk:"driver_installation_type"`
+ //GpuDriverVersion types.String `tfsdk:"gpu_driver_version"`
+}
+type managedKubernetesEngineJson struct {
+ ClusterName string `json:"cluster_name"`
+ NetworkID string `json:"network_id"`
+ K8SVersion string `json:"k8s_version"`
+ OsVersion interface{} `json:"os_version"`
+ Purpose string `json:"purpose"`
+ Pools []*managedKubernetesEnginePoolJson `json:"pools"`
+ PodNetwork string `json:"pod_network"`
+ PodPrefix string `json:"pod_prefix"`
+ ServiceNetwork string `json:"service_network"`
+ ServicePrefix string `json:"service_prefix"`
+ K8SMaxPod int64 `json:"k8s_max_pod"`
+ NetworkNodePrefix int64 `json:"network_node_prefix"`
+ RangeIPLbStart string `json:"range_ip_lb_start"`
+ RangeIPLbEnd string `json:"range_ip_lb_end"`
+ LoadBalancerType string `json:"loadBalancerType"`
+ NetworkType string `json:"network_type"`
+ SSHKey interface{} `json:"sshKey"`
+ TypeCreate string `json:"type_create"`
+ //RegionId string `json:"region_id"`
+}
+type managedKubernetesEnginePoolJson struct {
+ WorkerPoolID *string `json:"worker_pool_id"`
+ StorageProfile string `json:"storage_profile"`
+ WorkerType string `json:"worker_type"`
+ WorkerDiskSize int64 `json:"worker_disk_size"`
+ ContainerRuntime string `json:"container_runtime"`
+ AutoScale bool `json:"auto_scale"`
+ ScaleMin int64 `json:"scale_min"`
+ ScaleMax int64 `json:"scale_max"`
+ NetworkName string `json:"network_name"`
+ NetworkID string `json:"network_id"`
+ Kv []struct {
+ Name string `json:"name"`
+ } `json:"kv"`
+ VGpuID interface{} `json:"vGpuId"`
+ IsDisplayGPU bool `json:"isDisplayGPU"`
+ IsCreate bool `json:"isCreate"`
+ IsScale bool `json:"isScale"`
+ IsOthers bool `json:"isOthers"`
+ IsEnableAutoRepair bool `json:"isEnableAutoRepair"`
+ DriverInstallationType string `json:"driverInstallationType"`
+ GpuDriverVersion string `json:"gpuDriverVersion"`
+}
+type managedKubernetesEngineCreateResponse struct {
+ Error bool `json:"error"`
+ Kpi struct {
+ ClusterId string `json:"cluster_id"`
+ ClusterName string `json:"cluster_name"`
+ } `json:"kpi"`
+}
+type managedKubernetesEngineReadResponse struct {
+ Data managedKubernetesEngineData `json:"data"`
+ Mess []string `json:"mess"`
+ Error bool `json:"error"`
+}
+type managedKubernetesEngineOsVersionResponse struct {
+ Error bool `json:"error"`
+ Data []struct {
+ Label string `json:"label"`
+ OsVersion interface{} `json:"os_version"`
+ Value string `json:"value"`
+ } `json:"data"`
+}
+
+type managedKubernetesEngineData struct {
+ Status managedKubernetesEngineDataStatus `json:"status"`
+ Metadata managedKubernetesEngineDataMetadata `json:"metadata"`
+ Spec managedKubernetesEngineDataSpec `json:"spec"`
+}
+
+type managedKubernetesEngineDataStatus struct {
+ LastOperation struct {
+ Progress int `json:"progress"`
+ State string `json:"state"`
+ Type string `json:"type"`
+ } `json:"lastOperation"`
+}
+type managedKubernetesEngineDataMetadata struct {
+ Name string `json:"name"`
+ Labels map[string]string `json:"labels"`
+}
+type managedKubernetesEngineDataSpec struct {
+ Kubernetes struct {
+ Kubelet struct {
+ MaxPods int `json:"maxPods"`
+ } `json:"kubelet"`
+ Version string `json:"version"`
+ } `json:"kubernetes"`
+ LoadBalancerType string `json:"loadBalancerType"`
+ Networking struct {
+ Nodes string `json:"nodes"`
+ Pods string `json:"pods"`
+ Services string `json:"services"`
+ Type string `json:"type"`
+ } `json:"networking"`
+
+ SeedSelector struct {
+ MatchLabels struct {
+ GardenerCloudPurpose string `json:"gardener_cloud_purpose"`
+ } `json:"matchLabels"`
+ } `json:"seedSelector"`
+
+ Provider struct {
+ InfrastructureConfig struct {
+ Networks struct {
+ Id string `json:"id"`
+ LbIPRangeEnd string `json:"lbIpRangeEnd"`
+ LbIPRangeStart string `json:"lbIpRangeStart"`
+ Workers string `json:"workers"`
+ } `json:"networks"`
+ } `json:"infrastructureConfig"`
+ Workers []*managedKubernetesEngineDataWorker `json:"workers"`
+ } `json:"provider"`
+}
+
+type managedKubernetesEngineDataWorker struct {
+ Annotations []map[string]string `json:"annotations"`
+ Kubernetes struct {
+ Kubelet struct {
+ ContainerLogMaxFiles int `json:"containerLogMaxFiles"`
+ ContainerLogMaxSize string `json:"containerLogMaxSize"`
+ EvictionHard struct {
+ ImageFSAvailable string `json:"imageFSAvailable"`
+ ImageFSInodesFree string `json:"imageFSInodesFree"`
+ MemoryAvailable string `json:"memoryAvailable"`
+ NodeFSAvailable string `json:"nodeFSAvailable"`
+ NodeFSInodesFree string `json:"nodeFSInodesFree"`
+ } `json:"evictionHard"`
+ FailSwapOn bool `json:"failSwapOn"`
+ KubeReserved struct {
+ CPU string `json:"cpu"`
+ EphemeralStorage string `json:"ephemeralStorage"`
+ Memory string `json:"memory"`
+ Pid string `json:"pid"`
+ } `json:"kubeReserved"`
+ MaxPods int `json:"maxPods"`
+ SystemReserved struct {
+ CPU string `json:"cpu"`
+ EphemeralStorage string `json:"ephemeralStorage"`
+ Memory string `json:"memory"`
+ Pid string `json:"pid"`
+ } `json:"systemReserved"`
+ } `json:"kubelet"`
+ Version string `json:"version"`
+ } `json:"kubernetes"`
+ Labels []interface{} `json:"labels"`
+ Machine struct {
+ Image struct {
+ DriverInstallationType *string `json:"driverInstallationType"`
+ GpuDriverVersion *string `json:"gpuDriverVersion"`
+ Name string `json:"name"`
+ Version string `json:"version"`
+ } `json:"image"`
+ Type string `json:"type"`
+ } `json:"machine"`
+ MaxSurge int `json:"maxSurge"`
+ MaxUnavailable int `json:"maxUnavailable"`
+ Maximum int `json:"maximum"`
+ Minimum int `json:"minimum"`
+ Name string `json:"name"`
+ ProviderConfig struct {
+ APIVersion string `json:"apiVersion"`
+ Kind string `json:"kind"`
+ NetworkName string `json:"networkName"`
+ ServerGroup interface{} `json:"serverGroup"`
+ UserName string `json:"userName"`
+ VGpuID interface{} `json:"vGpuId"`
+ } `json:"providerConfig"`
+ SystemComponents struct {
+ Allow bool `json:"allow"`
+ } `json:"systemComponents"`
+ Taints []interface{} `json:"taints"`
+ Volume struct {
+ Size string `json:"size"`
+ Type string `json:"type"`
+ } `json:"volume"`
+ Zones []string `json:"zones"`
+}
+
+func (w *managedKubernetesEngineDataWorker) AutoRepair() bool {
+ autoRepair := false
+
+ for _, item := range w.Annotations {
+
+ if label, ok := item["worker.fptcloud.com/node-auto-repair"]; ok {
+ autoRepair = label == "true"
+ }
+ }
+
+ return autoRepair
+}
+
+type managedKubernetesEngineEditWorker struct {
+ Pools []*managedKubernetesEnginePoolJson `json:"pools"`
+ K8sVersion string `json:"k8s_version"`
+ TypeConfigure string `json:"type_configure"`
+ CurrentNetworking string `json:"currentNetworking"`
+}
diff --git a/fptcloud/mfke/util_network.go b/fptcloud/mfke/util_network.go
new file mode 100644
index 0000000..b981294
--- /dev/null
+++ b/fptcloud/mfke/util_network.go
@@ -0,0 +1,40 @@
+package fptcloud_mfke
+
+import (
+ "context"
+ "errors"
+ "github.com/hashicorp/terraform-plugin-log/tflog"
+ "strings"
+ fptcloud_subnet "terraform-provider-fptcloud/fptcloud/subnet"
+)
+
+func getNetworkIdByPlatform(ctx context.Context, client fptcloud_subnet.SubnetService, vpcId, platform string, w *managedKubernetesEngineDataWorker, data *managedKubernetesEngineData) (string, error) {
+ if strings.ToLower(platform) == "vmw" {
+ return getNetworkId(ctx, client, vpcId, w.ProviderConfig.NetworkName, "")
+ } else {
+ return getNetworkId(ctx, client, vpcId, "", data.Spec.Provider.InfrastructureConfig.Networks.Id)
+ }
+}
+
+func getNetworkId(ctx context.Context, client fptcloud_subnet.SubnetService, vpcId string, networkName string, networkId string) (string, error) {
+ if networkName != "" && networkId != "" {
+ return "", errors.New("only specify network name or id")
+ }
+
+ if networkName != "" {
+ tflog.Info(ctx, "Resolving network ID for VPC "+vpcId+", network "+networkName)
+ } else {
+ tflog.Info(ctx, "Resolving network ID for VPC "+vpcId+", network ID "+networkId)
+ }
+
+ networks, err := client.FindSubnetByName(fptcloud_subnet.FindSubnetDTO{
+ NetworkName: networkName,
+ NetworkID: networkId,
+ VpcId: vpcId,
+ })
+ if err != nil {
+ return "", err
+ }
+
+ return networks.ID, nil
+}
diff --git a/fptcloud/mfke/util_validations.go b/fptcloud/mfke/util_validations.go
new file mode 100644
index 0000000..54e54f1
--- /dev/null
+++ b/fptcloud/mfke/util_validations.go
@@ -0,0 +1,85 @@
+package fptcloud_mfke
+
+import (
+ "fmt"
+ diag2 "github.com/hashicorp/terraform-plugin-framework/diag"
+ "strings"
+)
+
+func validatePool(pools []*managedKubernetesEnginePool) *diag2.ErrorDiagnostic {
+ if len(pools) == 0 {
+ d := diag2.NewErrorDiagnostic("Invalid configuration", "At least a worker pool must be configured")
+ return &d
+ }
+
+ groupNames := map[string]bool{}
+ for _, pool := range pools {
+ name := pool.WorkerPoolID.ValueString()
+ if name == "worker-new" {
+ d := diag2.NewErrorDiagnostic("Invalid worker group name", "Worker group name \"worker-new\" is reserved")
+ return &d
+ }
+
+ if _, ok := groupNames[name]; ok {
+ d := diag2.NewErrorDiagnostic("Duplicate worker group name", "Worker group name "+name+" is used twice")
+ return &d
+ }
+
+ groupNames[name] = true
+ }
+
+ return nil
+}
+
+func validateNetwork(state *managedKubernetesEngine, platform string) *diag2.ErrorDiagnostic {
+ if strings.ToLower(platform) == "osp" {
+ if state.NetworkID.ValueString() == "" {
+ d := diag2.NewErrorDiagnostic(
+ "Global network ID must be specified",
+ "VPC platform is OSP. Network ID must be specified globally and each worker group's network ID must match",
+ )
+ return &d
+ }
+
+ network := state.NetworkID.ValueString()
+ for _, pool := range state.Pools {
+ if pool.NetworkID.ValueString() != network {
+ d := diag2.NewErrorDiagnostic(
+ "Worker network ID mismatch",
+ fmt.Sprintf("VPC platform is OSP. Network ID of worker group \"%s\" must match global one", pool.WorkerPoolID.ValueString()),
+ )
+ return &d
+ }
+ }
+ } else {
+ if state.NetworkID.ValueString() != "" {
+ d := diag2.NewErrorDiagnostic(
+ "Global network ID is not supported",
+ "VPC platform is VMW. Network ID must be specified per worker group, not globally",
+ )
+
+ return &d
+ }
+ }
+
+ return nil
+}
+
+func validatePoolNames(pool []*managedKubernetesEnginePool) ([]string, error) {
+ var poolNames []string
+
+ if len(pool) != 0 {
+ existingPool := map[string]*managedKubernetesEnginePool{}
+ for _, pool := range pool {
+ name := pool.WorkerPoolID.ValueString()
+ if _, ok := existingPool[name]; ok {
+ return nil, fmt.Errorf("pool %s already exists", name)
+ }
+
+ existingPool[name] = pool
+ poolNames = append(poolNames, name)
+ }
+ }
+
+ return poolNames, nil
+}
diff --git a/fptcloud/provider_tf6.go b/fptcloud/provider_tf6.go
new file mode 100644
index 0000000..0f62ca7
--- /dev/null
+++ b/fptcloud/provider_tf6.go
@@ -0,0 +1,167 @@
+package fptcloud
+
+import (
+ "context"
+ "github.com/hashicorp/terraform-plugin-framework/datasource"
+ "github.com/hashicorp/terraform-plugin-framework/path"
+ "github.com/hashicorp/terraform-plugin-framework/provider"
+ "github.com/hashicorp/terraform-plugin-framework/provider/schema"
+ "github.com/hashicorp/terraform-plugin-framework/resource"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+ "github.com/hashicorp/terraform-plugin-log/tflog"
+ "os"
+ common "terraform-provider-fptcloud/commons"
+ fptcloud_database "terraform-provider-fptcloud/fptcloud/database"
+ fptcloud_dfke "terraform-provider-fptcloud/fptcloud/dfke"
+ fptcloud_edge_gateway "terraform-provider-fptcloud/fptcloud/edge_gateway"
+ fptcloud_mfke "terraform-provider-fptcloud/fptcloud/mfke"
+)
+
+var (
+ _ provider.Provider = &xplatProvider{}
+)
+
+type xplatProviderModel struct {
+ Region types.String `tfsdk:"region"`
+ Token types.String `tfsdk:"token"`
+ TenantName types.String `tfsdk:"tenant_name"`
+ ApiEndpoint types.String `tfsdk:"api_endpoint"`
+}
+
+type xplatProvider struct {
+ // version is set to the provider version on release, "dev" when the
+ // provider is built and ran locally, and "test" when running acceptance
+ // testing.
+ version string
+}
+
+func NewXplatProvider(version string) func() provider.Provider {
+ return func() provider.Provider {
+ return &xplatProvider{
+ version: version,
+ }
+ }
+}
+
+func (x *xplatProvider) Metadata(ctx context.Context, request provider.MetadataRequest, response *provider.MetadataResponse) {
+ response.TypeName = "fptcloud"
+ response.Version = x.version
+}
+
+func (x *xplatProvider) Schema(ctx context.Context, request provider.SchemaRequest, response *provider.SchemaResponse) {
+ response.Schema = schema.Schema{
+ Description: "",
+ Attributes: map[string]schema.Attribute{
+ "region": schema.StringAttribute{
+ Description: "The region to use (VN/HAN | VN/SGN)",
+ Optional: true,
+ },
+
+ "token": schema.StringAttribute{
+ Description: "This is the Fpt cloud API token. Alternatively, this can also be specified using `FPTCLOUD_TOKEN` environment variable.",
+ Optional: true,
+ },
+
+ "tenant_name": schema.StringAttribute{
+ Description: "The tenant name to use",
+ Optional: true,
+ },
+
+ "api_endpoint": schema.StringAttribute{
+ Description: "The URL to use",
+ Optional: true,
+ },
+ },
+ }
+}
+
+func (x *xplatProvider) Configure(ctx context.Context, request provider.ConfigureRequest, response *provider.ConfigureResponse) {
+ tflog.Info(ctx, "Configuring FPTCloud client")
+ var config xplatProviderModel
+
+ diags := request.Config.Get(ctx, &config)
+ response.Diagnostics.Append(diags...)
+ if response.Diagnostics.HasError() {
+ return
+ }
+ token := os.Getenv("FPTCLOUD_TOKEN")
+ region := os.Getenv("FPTCLOUD_REGION")
+ tenantName := os.Getenv("FPTCLOUD_TENANT_NAME")
+ apiEndpoint := os.Getenv("FPTCLOUD_API_URL")
+
+ if !config.Token.IsNull() {
+ token = config.Token.ValueString()
+ }
+
+ if !config.Region.IsNull() {
+ region = config.Region.ValueString()
+ }
+
+ if !config.TenantName.IsNull() {
+ tenantName = config.TenantName.ValueString()
+ }
+
+ if !config.ApiEndpoint.IsNull() {
+ apiEndpoint = config.ApiEndpoint.ValueString()
+ }
+
+ if apiEndpoint == "" {
+ apiEndpoint = ProdAPI
+ }
+
+ if token == "" {
+ response.Diagnostics.AddAttributeError(
+ path.Root("token"),
+ "Missing token",
+ "Token must be specified to authenticate to provision resources",
+ )
+ }
+
+ if response.Diagnostics.HasError() {
+ return
+ }
+
+ ctx = tflog.SetField(ctx, "token", token)
+ ctx = tflog.MaskFieldValuesWithFieldKeys(ctx, "token")
+ tflog.Debug(ctx, "Creating FPTCloud client")
+
+ client, err := common.NewClientWithURL(token, apiEndpoint, region, tenantName)
+
+ if err != nil {
+ response.Diagnostics.AddError("Error creating client", err.Error())
+ return
+ }
+
+ userAgent := &common.Component{
+ Name: "terraform-provider-fptcloud",
+ Version: ProviderVersion,
+ }
+ client.SetUserAgent(userAgent)
+
+ response.DataSourceData = client
+ response.ResourceData = client
+
+ tflog.Info(ctx, "Configured FPTCloud client", map[string]any{
+ "success": true,
+ "api_endpoint": apiEndpoint,
+ "tenant_name": tenantName,
+ })
+}
+
+func (x *xplatProvider) DataSources(ctx context.Context) []func() datasource.DataSource {
+ return []func() datasource.DataSource{
+ fptcloud_dfke.NewDataSourceDedicatedKubernetesEngine,
+ fptcloud_mfke.NewDataSourceManagedKubernetesEngine,
+ fptcloud_edge_gateway.NewDataSourceEdgeGateway,
+ }
+}
+
+func (x *xplatProvider) Resources(ctx context.Context) []func() resource.Resource {
+ return []func() resource.Resource{
+ fptcloud_dfke.NewResourceDedicatedKubernetesEngine,
+ fptcloud_dfke.NewResourceDedicatedKubernetesEngineState,
+ fptcloud_mfke.NewResourceManagedKubernetesEngine,
+ fptcloud_database.NewResourceDatabase,
+ fptcloud_database.NewResourceDatabaseStatus,
+ }
+}
diff --git a/fptcloud/subnet/datasource_subnet.go b/fptcloud/subnet/datasource_subnet.go
index d3b4571..55b83b0 100644
--- a/fptcloud/subnet/datasource_subnet.go
+++ b/fptcloud/subnet/datasource_subnet.go
@@ -41,6 +41,11 @@ func subnetSchema() map[string]*schema.Schema {
Computed: true,
Description: "The id of the subnet",
},
+ "network_id": {
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The network id of the subnet",
+ },
"name": {
Type: schema.TypeString,
Computed: true,
@@ -75,6 +80,7 @@ func flattenSubnet(subnet, _ interface{}, _ map[string]interface{}) (map[string]
flattened := map[string]interface{}{}
flattened["id"] = s.ID
+ flattened["network_id"] = s.NetworkID
flattened["name"] = s.Name
flattened["network_name"] = s.NetworkName
flattened["gateway"] = s.Gateway
diff --git a/fptcloud/subnet/subnet_service.go b/fptcloud/subnet/subnet_service.go
index 5e2e5ec..8d3220f 100644
--- a/fptcloud/subnet/subnet_service.go
+++ b/fptcloud/subnet/subnet_service.go
@@ -32,6 +32,7 @@ type SubnetResponseDto struct {
type Subnet struct {
ID string `json:"id"`
Name string `json:"name"`
+ NetworkID string `json:"network_id"`
NetworkName string `json:"network_name"`
Gateway string `json:"gateway"`
VpcId string `json:"vpc_id"`
diff --git a/go.mod b/go.mod
index 73025aa..8f74c3a 100644
--- a/go.mod
+++ b/go.mod
@@ -3,6 +3,10 @@ module terraform-provider-fptcloud
go 1.21
require (
+ github.com/hashicorp/terraform-plugin-framework v1.10.0
+ github.com/hashicorp/terraform-plugin-go v0.23.0
+ github.com/hashicorp/terraform-plugin-log v0.9.0
+ github.com/hashicorp/terraform-plugin-mux v0.16.0
github.com/hashicorp/terraform-plugin-sdk/v2 v2.33.0
github.com/stretchr/testify v1.8.2
)
@@ -41,8 +45,6 @@ require (
github.com/hashicorp/terraform-exec v0.21.0 // indirect
github.com/hashicorp/terraform-json v0.22.1 // indirect
github.com/hashicorp/terraform-plugin-docs v0.19.4 // indirect
- github.com/hashicorp/terraform-plugin-go v0.23.0 // indirect
- github.com/hashicorp/terraform-plugin-log v0.9.0 // indirect
github.com/hashicorp/terraform-registry-address v0.2.3 // indirect
github.com/hashicorp/terraform-svchost v0.1.1 // indirect
github.com/hashicorp/yamux v0.1.1 // indirect
diff --git a/go.sum b/go.sum
index 1377a32..6733222 100644
--- a/go.sum
+++ b/go.sum
@@ -1,3 +1,5 @@
+dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
+dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak=
github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/Kunde21/markdownfmt/v3 v3.1.0 h1:KiZu9LKs+wFFBQKhrZJrFZwtLnCCWJahL+S+E/3VnM0=
@@ -8,8 +10,8 @@ github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7Y
github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA=
github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM=
-github.com/ProtonMail/go-crypto v1.1.0-alpha.0 h1:nHGfwXmFvJrSR9xu8qL7BkO4DqTHXE9N5vPhgY2I+j0=
-github.com/ProtonMail/go-crypto v1.1.0-alpha.0/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE=
+github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
+github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
github.com/ProtonMail/go-crypto v1.1.0-alpha.2 h1:bkyFVUP+ROOARdgCiJzNQo2V2kiB97LyUpzH9P6Hrlg=
github.com/ProtonMail/go-crypto v1.1.0-alpha.2/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE=
github.com/agext/levenshtein v1.2.2 h1:0S/Yg6LYmFJ5stwQeRp6EeOcCbj7xiqQSdNelsXvaqE=
@@ -28,14 +30,26 @@ github.com/bufbuild/protocompile v0.4.0/go.mod h1:3v93+mbWn/v3xzN+31nwkJfrEpAUwp
github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU=
github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg=
+github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc=
+github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ=
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
+github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI=
+github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic=
+github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU=
+github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow=
+github.com/go-git/go-git/v5 v5.12.0 h1:7Md+ndsjrzZxbddRDZjF14qK+NN56sy6wkqaVrjZtys=
+github.com/go-git/go-git/v5 v5.12.0/go.mod h1:FTM9VKtnI2m65hNI/TenDDDnUf2Q9FHnXYjuz9i5OEY=
github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68=
github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
@@ -50,7 +64,6 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/hashicorp/cli v1.1.6 h1:CMOV+/LJfL1tXCOKrgAX0uRKnzjj/mpmqNXloRSy2K8=
github.com/hashicorp/cli v1.1.6/go.mod h1:MPon5QYlgjjo0BSoAiN0ESeT5fRzDjVRp+uioJ0piz4=
-github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
@@ -73,28 +86,26 @@ github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/C
github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY=
github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
-github.com/hashicorp/hc-install v0.6.3 h1:yE/r1yJvWbtrJ0STwScgEnCanb0U9v7zp0Gbkmcoxqs=
-github.com/hashicorp/hc-install v0.6.3/go.mod h1:KamGdbodYzlufbWh4r9NRo8y6GLHWZP2GBtdnms1Ln0=
github.com/hashicorp/hc-install v0.7.0 h1:Uu9edVqjKQxxuD28mR5TikkKDd/p55S8vzPC1659aBk=
github.com/hashicorp/hc-install v0.7.0/go.mod h1:ELmmzZlGnEcqoUMKUuykHaPCIR1sYLYX+KSggWSKZuA=
github.com/hashicorp/hcl/v2 v2.20.1 h1:M6hgdyz7HYt1UN9e61j+qKJBqR3orTWbI1HKBJEdxtc=
github.com/hashicorp/hcl/v2 v2.20.1/go.mod h1:TZDqQ4kNKCbh1iJp99FdPiUaVDDUPivbqxZulxDYqL4=
github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y=
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
-github.com/hashicorp/terraform-exec v0.20.0 h1:DIZnPsqzPGuUnq6cH8jWcPunBfY+C+M8JyYF3vpnuEo=
-github.com/hashicorp/terraform-exec v0.20.0/go.mod h1:ckKGkJWbsNqFKV1itgMnE0hY9IYf1HoiekpuN0eWoDw=
github.com/hashicorp/terraform-exec v0.21.0 h1:uNkLAe95ey5Uux6KJdua6+cv8asgILFVWkd/RG0D2XQ=
github.com/hashicorp/terraform-exec v0.21.0/go.mod h1:1PPeMYou+KDUSSeRE9szMZ/oHf4fYUmB923Wzbq1ICg=
-github.com/hashicorp/terraform-json v0.21.0 h1:9NQxbLNqPbEMze+S6+YluEdXgJmhQykRyRNd+zTI05U=
-github.com/hashicorp/terraform-json v0.21.0/go.mod h1:qdeBs11ovMzo5puhrRibdD6d2Dq6TyE/28JiU4tIQxk=
github.com/hashicorp/terraform-json v0.22.1 h1:xft84GZR0QzjPVWs4lRUwvTcPnegqlyS7orfb5Ltvec=
github.com/hashicorp/terraform-json v0.22.1/go.mod h1:JbWSQCLFSXFFhg42T7l9iJwdGXBYV8fmmD6o/ML4p3A=
github.com/hashicorp/terraform-plugin-docs v0.19.4 h1:G3Bgo7J22OMtegIgn8Cd/CaSeyEljqjH3G39w28JK4c=
github.com/hashicorp/terraform-plugin-docs v0.19.4/go.mod h1:4pLASsatTmRynVzsjEhbXZ6s7xBlUw/2Kt0zfrq8HxA=
+github.com/hashicorp/terraform-plugin-framework v1.10.0 h1:xXhICE2Fns1RYZxEQebwkB2+kXouLC932Li9qelozrc=
+github.com/hashicorp/terraform-plugin-framework v1.10.0/go.mod h1:qBXLDn69kM97NNVi/MQ9qgd1uWWsVftGSnygYG1tImM=
github.com/hashicorp/terraform-plugin-go v0.23.0 h1:AALVuU1gD1kPb48aPQUjug9Ir/125t+AAurhqphJ2Co=
github.com/hashicorp/terraform-plugin-go v0.23.0/go.mod h1:1E3Cr9h2vMlahWMbsSEcNrOCxovCZhOOIXjFHbjc/lQ=
github.com/hashicorp/terraform-plugin-log v0.9.0 h1:i7hOA+vdAItN1/7UrfBqBwvYPQ9TFvymaRGZED3FCV0=
github.com/hashicorp/terraform-plugin-log v0.9.0/go.mod h1:rKL8egZQ/eXSyDqzLUuwUYLVdlYeamldAHSxjUFADow=
+github.com/hashicorp/terraform-plugin-mux v0.16.0 h1:RCzXHGDYwUwwqfYYWJKBFaS3fQsWn/ZECEiW7p2023I=
+github.com/hashicorp/terraform-plugin-mux v0.16.0/go.mod h1:PF79mAsPc8CpusXPfEVa4X8PtkB+ngWoiUClMrNZlYo=
github.com/hashicorp/terraform-plugin-sdk/v2 v2.33.0 h1:qHprzXy/As0rxedphECBEQAh3R4yp6pKksKHcqZx5G8=
github.com/hashicorp/terraform-plugin-sdk/v2 v2.33.0/go.mod h1:H+8tjs9TjV2w57QFVSMBQacf8k/E1XwLXGCARgViC6A=
github.com/hashicorp/terraform-registry-address v0.2.3 h1:2TAiKJ1A3MAkZlH1YI/aTVcLZRu7JseiXNRHbOAyoTI=
@@ -108,8 +119,12 @@ github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq
github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM=
github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
+github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=
+github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
github.com/jhump/protoreflect v1.15.1 h1:HUMERORf3I3ZdX05WaQ6MIpd/NJ434hTp5YiKgfCL6c=
github.com/jhump/protoreflect v1.15.1/go.mod h1:jD/2GMKKE6OqX8qTjhADU1e6DShO+gavG9e0Q693nKo=
+github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4=
+github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
@@ -142,6 +157,8 @@ github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zx
github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw=
github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
+github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4=
+github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/posener/complete v1.2.3 h1:NP0eAhjcjImqslEwo/1hq7gpajME0fTLTezBKDqfXqo=
@@ -149,9 +166,13 @@ github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSg
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
+github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8=
+github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4=
github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8=
github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
+github.com/skeema/knownhosts v1.2.2 h1:Iug2P4fLmDw9f41PB6thxUkNUkJzB5i+1/exaj40L3A=
+github.com/skeema/knownhosts v1.2.2/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo=
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w=
github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU=
@@ -173,6 +194,8 @@ github.com/vmihailenco/msgpack/v5 v5.4.1 h1:cQriyiUvjTwOHg8QZaPihLWeRAAVoCpE00IU
github.com/vmihailenco/msgpack/v5 v5.4.1/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21PIudVV/E3rRQok=
github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g=
github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds=
+github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM=
+github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/yuin/goldmark v1.7.1 h1:3bajkSilaCbjdKVsKdZjZCLBNPL9pYzrCakKaf4U49U=
github.com/yuin/goldmark v1.7.1/go.mod h1:uzxRWxtg69N339t3louHJ7+O03ezfj6PlliRlaOzY1E=
@@ -254,6 +277,8 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=
+gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/main.go b/main.go
index af017c8..88eed1c 100644
--- a/main.go
+++ b/main.go
@@ -1,12 +1,20 @@
package main
import (
+ "context"
"flag"
- "github.com/hashicorp/terraform-plugin-sdk/v2/plugin"
+ "github.com/hashicorp/terraform-plugin-framework/providerserver"
+ "github.com/hashicorp/terraform-plugin-go/tfprotov5"
+ "github.com/hashicorp/terraform-plugin-go/tfprotov5/tf5server"
+ "github.com/hashicorp/terraform-plugin-mux/tf5muxserver"
+ "log"
"terraform-provider-fptcloud/fptcloud"
)
+//go:generate go run github.com/hashicorp/terraform-plugin-docs/cmd/tfplugindocs generate -provider-name terraform-provider-fptcloud
+
func main() {
+ ctx := context.Background()
var debugMode bool = true
flag.BoolVar(
@@ -17,12 +25,29 @@ func main() {
)
flag.Parse()
- opts := &plugin.ServeOpts{ProviderFunc: fptcloud.Provider}
+ providers := []func() tfprotov5.ProviderServer{
+ providerserver.NewProtocol5(fptcloud.NewXplatProvider("dev")()),
+ fptcloud.Provider().GRPCProvider,
+ }
+
+ muxServer, err := tf5muxserver.NewMuxServer(ctx, providers...)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ var serveOpts []tf5server.ServeOpt
if debugMode {
- opts.Debug = true
- opts.ProviderAddr = "github.com/terraform-providers/fptcloud"
+ serveOpts = append(serveOpts, tf5server.WithManagedDebug())
}
- plugin.Serve(opts)
+ err = tf5server.Serve(
+ "github.com/terraform-providers/fptcloud",
+ muxServer.ProviderServer,
+ serveOpts...,
+ )
+
+ if err != nil {
+ log.Fatal(err)
+ }
}