diff --git a/commons/api_path.go b/commons/api_path.go index df681f5..3e643f9 100644 --- a/commons/api_path.go +++ b/commons/api_path.go @@ -80,11 +80,11 @@ var ApiPath = struct { GetBucketVersioning func(vpcId, s3ServiceId, bucketName string) string PutBucketVersioning func(vpcId, s3ServiceId, bucketName string) string // Bucket Lifecycle - GetBucketLifecycle func(vpcId, s3ServiceId, bucketName, page, pageSize string) string + GetBucketLifecycle func(vpcId, s3ServiceId, bucketName string, page, pageSize int) string PutBucketLifecycle func(vpcId, s3ServiceId, bucketName string) string DeleteBucketLifecycle func(vpcId, s3ServiceId, bucketName string) string // Bucket CORS - GetBucketCORS func(vpcId, s3ServiceId, bucketName string) string + GetBucketCORS func(vpcId, s3ServiceId, bucketName string, page, pageSize int) string PutBucketCORS func(vpcId, s3ServiceId, bucketName string) string CreateBucketCors func(vpcId, s3ServiceId, bucketName string) string // Bucket ACL @@ -308,8 +308,8 @@ var ApiPath = struct { return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/bucket/%s/delete-config", vpcId, s3ServiceId, bucketName) }, // Bucket Lifecycle - GetBucketLifecycle: func(vpcId, s3ServiceId, bucketName, page, pageSize string) string { - return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/bucket/%s/lifecycles?page=%s&page_size=%s", vpcId, s3ServiceId, bucketName, page, pageSize) + GetBucketLifecycle: func(vpcId, s3ServiceId, bucketName string, page, pageSize int) string { + return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/bucket/%s/lifecycles?page=%d&page_size=%d", vpcId, s3ServiceId, bucketName, page, pageSize) }, PutBucketLifecycle: func(vpcId, s3ServiceId, bucketName string) string { return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/bucket/%s/create-bucket-lifecycle-configuration`", vpcId, s3ServiceId, bucketName) @@ -318,8 +318,8 @@ var ApiPath = struct { return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/bucket/%s/delete-bucket-lifecycle-configuration", vpcId, s3ServiceId, bucketName) }, // Bucket CORS - GetBucketCORS: func(vpcId, s3ServiceId, bucketName string) string { - return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/bucket/%s/cors", vpcId, s3ServiceId, bucketName) + GetBucketCORS: func(vpcId, s3ServiceId, bucketName string, page, pageSize int) string { + return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/bucket/%s/cors?page=%d&page_size=%d", vpcId, s3ServiceId, bucketName, page, pageSize) }, PutBucketCORS: func(vpcId, s3ServiceId, bucketName string) string { return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/bucket/%s/put-bucket-cors", vpcId, s3ServiceId, bucketName) diff --git a/examples/data-sources/fptcloud_object_storage/datasource_list_access_keys.tf b/examples/data-sources/fptcloud_object_storage/datasource_list_access_keys.tf new file mode 100644 index 0000000..0854940 --- /dev/null +++ b/examples/data-sources/fptcloud_object_storage/datasource_list_access_keys.tf @@ -0,0 +1,8 @@ +data "fptcloud_object_storage_access_key" "keys" { + vpc_id = "your_vpc_id" + region_name = "your_region_name" +} +// for raw data and all access keys from region_name will be listed +output "access_key" { + value = data.fptcloud_object_storage_access_key.keys +} diff --git a/examples/data-sources/fptcloud_object_storage/datasource_list_bucket_acl.tf b/examples/data-sources/fptcloud_object_storage/datasource_list_bucket_acl.tf new file mode 100644 index 0000000..673ff5b --- /dev/null +++ b/examples/data-sources/fptcloud_object_storage/datasource_list_bucket_acl.tf @@ -0,0 +1,9 @@ +data "fptcloud_object_storage_bucket_acl" "example_bucket_acl" { + vpc_id = "your_vpc_id" + region_name = "your_region_name" + bucket_name = "your_bucket_name" +} + +output "bucket_acl" { + value = data.fptcloud_object_storage_bucket_acl.example_bucket_acl +} diff --git a/examples/data-sources/fptcloud_object_storage/datasource_list_bucket_cors.tf b/examples/data-sources/fptcloud_object_storage/datasource_list_bucket_cors.tf new file mode 100644 index 0000000..b69910c --- /dev/null +++ b/examples/data-sources/fptcloud_object_storage/datasource_list_bucket_cors.tf @@ -0,0 +1,11 @@ +data "fptcloud_object_storage_bucket_cors" "example_bucket_cors" { + vpc_id = "1b413c55-b752-4183-abad-06c4b5aca6ad" + region_name = "HCM-02" + bucket_name = "hoanglm3-test-terraform-static-website" + page = 1 + page_size = 100 +} + +output "bucket_cors" { + value = data.fptcloud_object_storage_bucket_cors.example_bucket_cors +} diff --git a/examples/data-sources/fptcloud_object_storage/datasource_list_bucket_lifecycle.tf b/examples/data-sources/fptcloud_object_storage/datasource_list_bucket_lifecycle.tf new file mode 100644 index 0000000..2fd519e --- /dev/null +++ b/examples/data-sources/fptcloud_object_storage/datasource_list_bucket_lifecycle.tf @@ -0,0 +1,11 @@ +data "fptcloud_object_storage_lifecycle" "example_bucket_lifecycle" { + vpc_id = "your_vpc_id" + region_name = "your_region_name" + bucket_name = "your_bucket_name" + page = 1 + page_size = 100 +} + +output "bucket_lifecycle" { + value = data.fptcloud_object_storage_lifecycle.example_bucket_lifecycle +} diff --git a/examples/data-sources/fptcloud_object_storage/datasource_list_bucket_policy.tf b/examples/data-sources/fptcloud_object_storage/datasource_list_bucket_policy.tf new file mode 100644 index 0000000..7d2f678 --- /dev/null +++ b/examples/data-sources/fptcloud_object_storage/datasource_list_bucket_policy.tf @@ -0,0 +1,9 @@ +data "fptcloud_object_storage_bucket_policy" "example_bucket_policy" { + vpc_id = "your_vpc_id" + region_name = "your_region_name" + bucket_name = "your_bucket_name" +} + +output "bucket_policy" { + value = data.fptcloud_object_storage_bucket_policy.example_bucket_policy.policy +} diff --git a/examples/data-sources/fptcloud_object_storage/datasource_list_bucket_static_website.tf b/examples/data-sources/fptcloud_object_storage/datasource_list_bucket_static_website.tf new file mode 100644 index 0000000..ae1846e --- /dev/null +++ b/examples/data-sources/fptcloud_object_storage/datasource_list_bucket_static_website.tf @@ -0,0 +1,9 @@ +data "fptcloud_object_storage_static_website" "example_bucket_static_website" { + vpc_id = "your_vpc_id" + region_name = "your_region_name" + bucket_name = "your_bucket_name" +} + +output "bucket_static_website" { + value = data.fptcloud_object_storage_static_website.example_bucket_static_website +} diff --git a/examples/data-sources/fptcloud_object_storage/datasource_list_bucket_versioning.tf b/examples/data-sources/fptcloud_object_storage/datasource_list_bucket_versioning.tf new file mode 100644 index 0000000..f32ab2a --- /dev/null +++ b/examples/data-sources/fptcloud_object_storage/datasource_list_bucket_versioning.tf @@ -0,0 +1,9 @@ +data "fptcloud_object_storage_bucket_versioning" "example_bucket_versioning" { + vpc_id = "your_vpc_id" + region_name = "your_region_name" + bucket_name = "your_bucket_name" +} + +output "bucket_versioning" { + value = data.fptcloud_object_storage_bucket_versioning.example_bucket_versioning +} diff --git a/fptcloud/object-storage/dataqsource_object_storage_bucket_acl.go b/fptcloud/object-storage/dataqsource_object_storage_bucket_acl.go new file mode 100644 index 0000000..49ea7b2 --- /dev/null +++ b/fptcloud/object-storage/dataqsource_object_storage_bucket_acl.go @@ -0,0 +1,159 @@ +package fptcloud_object_storage + +import ( + "context" + "fmt" + common "terraform-provider-fptcloud/commons" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func DataSourceBucketAcl() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceBucketAclRead, + Schema: map[string]*schema.Schema{ + "vpc_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The VPC ID", + }, + "bucket_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Name of the bucket to config the ACL", + }, + "region_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The region name that's are the same with the region name in the S3 service. Currently, we have: HCM-01, HCM-02, HN-01, HN-02", + }, + "canned_acl": { + Type: schema.TypeString, + Computed: true, + Description: "The Access Control List (ACL) status of the bucket which can be one of the following values: private, public-read, default is private", + }, + "status": { + Type: schema.TypeBool, + Computed: true, + Description: "The status after configuring the bucket ACL", + }, + "bucket_acl": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "owner": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "display_name": { + Type: schema.TypeString, + Computed: true, + }, + "id": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "grants": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "grantee": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "display_name": { + Type: schema.TypeString, + Computed: true, + }, + "id": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "permission": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func dataSourceBucketAclRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + client := m.(*common.Client) + service := NewObjectStorageService(client) + vpcId := d.Get("vpc_id").(string) + bucketName := d.Get("bucket_name").(string) + regionName := d.Get("region_name").(string) + s3ServiceDetail := getServiceEnableRegion(service, vpcId, regionName) + if s3ServiceDetail.S3ServiceId == "" { + return diag.FromErr(fmt.Errorf("region %s is not enabled", regionName)) + } + r := service.GetBucketAcl(vpcId, s3ServiceDetail.S3ServiceId, bucketName) + if !r.Status { + return diag.Errorf("failed to get bucket ACL for bucket %s", bucketName) + } + bucketAcl := []interface{}{ + map[string]interface{}{ + "owner": []interface{}{ + map[string]interface{}{ + "display_name": r.Owner.DisplayName, + "id": r.Owner.ID, + }, + }, + "grants": func() []interface{} { + grants := make([]interface{}, len(r.Grants)) + for i, grant := range r.Grants { + grants[i] = map[string]interface{}{ + "grantee": []interface{}{ + map[string]interface{}{ + "display_name": grant.Grantee.DisplayName, + "id": grant.Grantee.ID, + "type": grant.Grantee.Type, + }, + }, + "permission": grant.Permission, + } + } + return grants + }(), + }, + } + d.SetId(bucketName) + if err := d.Set("bucket_acl", bucketAcl); err != nil { + d.SetId("") + return diag.FromErr(err) + } + if err := d.Set("canned_acl", r.CannedACL); err != nil { + d.SetId("") + return diag.FromErr(err) + } + if err := d.Set("status", r.Status); err != nil { + d.SetId("") + return diag.FromErr(err) + } + + return nil +} diff --git a/fptcloud/object-storage/datasource_object_storage.go b/fptcloud/object-storage/datasource_object_storage.go index 9e17fca..e29c600 100644 --- a/fptcloud/object-storage/datasource_object_storage.go +++ b/fptcloud/object-storage/datasource_object_storage.go @@ -67,17 +67,24 @@ type CorsRule struct { ID string `json:"ID,omitempty"` AllowedOrigins []string `json:"AllowedOrigins"` AllowedMethods []string `json:"AllowedMethods"` - MaxAgeSeconds int `json:"MaxAgeSeconds,omitempty"` ExposeHeaders []string `json:"ExposeHeaders,omitempty"` AllowedHeaders []string `json:"AllowedHeaders,omitempty"` + MaxAgeSeconds int `json:"MaxAgeSeconds"` } type BucketCors struct { CorsRules []CorsRule `json:"CORSRules"` } type BucketCorsResponse struct { - Status bool `json:"status"` - Total int `json:"total"` - CorsRules []CorsRule `json:"cors_rules"` + Status bool `json:"status"` + CorsRules []struct { + ID string `json:"ID"` + AllowedHeaders []string `json:"AllowedHeaders,omitempty"` + AllowedMethods []string `json:"AllowedMethods"` + AllowedOrigins []string `json:"AllowedOrigins"` + ExposeHeaders []string `json:"ExposeHeaders,omitempty"` + MaxAgeSeconds int `json:"MaxAgeSeconds"` + } `json:"cors_rules"` + Total int `json:"total"` } type BucketPolicyResponse struct { @@ -98,6 +105,10 @@ type Statement struct { type BucketVersioningRequest struct { Status string `json:"status"` // "Enabled" or "Suspended" } +type BucketVersioningResponse struct { + Status bool `json:"status"` + Config string `json:"config"` // "Enabled" or "Suspended" +} type BucketAclResponse struct { Status bool `json:"status"` @@ -190,16 +201,20 @@ type BucketLifecycleResponse struct { Status bool `json:"status"` Rules []struct { Expiration struct { - Days int `json:"Days"` + ExpiredObjectDeleteMarker bool `json:"ExpiredObjectDeleteMarker"` } `json:"Expiration"` ID string `json:"ID"` Filter struct { Prefix string `json:"Prefix"` - } `json:"Filter"` - Status string `json:"Status"` + } `json:"Filter,omitempty"` + Status string `json:"Status"` + NoncurrentVersionExpiration struct { + NoncurrentDays int `json:"NoncurrentDays"` + } `json:"NoncurrentVersionExpiration"` AbortIncompleteMultipartUpload struct { DaysAfterInitiation int `json:"DaysAfterInitiation"` } `json:"AbortIncompleteMultipartUpload"` + Prefix string `json:"Prefix,omitempty"` } `json:"rules"` Total int `json:"total"` } @@ -242,11 +257,11 @@ type ObjectStorageService interface { // CORS configuration PutBucketCors(bucketName, vpcId, s3ServiceId string, cors CorsRule) (CommonResponse, error) UpdateBucketCors(bucketName, vpcId, s3ServiceId string, cors BucketCors) (CommonResponse, error) - GetBucketCors(vpcId, s3ServiceId, bucketName string) (*BucketCors, error) + GetBucketCors(vpcId, s3ServiceId, bucketName string, page, pageSize int) (*BucketCorsResponse, error) // Versioning configuration PutBucketVersioning(vpcId, s3ServiceId, bucketName string, versioning BucketVersioningRequest) error - GetBucketVersioning(vpcId, s3ServiceId, bucketName string) *BucketVersioningRequest + GetBucketVersioning(vpcId, s3ServiceId, bucketName string) *BucketVersioningResponse // Acl configuration PutBucketAcl(vpcId, s3ServiceId, bucketName string, acl BucketAclRequest) PutBucketAclResponse @@ -258,7 +273,7 @@ type ObjectStorageService interface { DeleteBucketStaticWebsite(vpcId, s3ServiceId, bucketName string) CommonResponse // Lifecycle configuration - GetBucketLifecycle(vpcId, s3ServiceId, bucketName, page, pageSize string) (*BucketLifecycleResponse, error) + GetBucketLifecycle(vpcId, s3ServiceId, bucketName string, page, pageSize int) (*BucketLifecycleResponse, error) PutBucketLifecycle(vpcId, s3ServiceId, bucketName string, lifecycle interface{}) (*BucketLifecycleResponse, error) DeleteBucketLifecycle(vpcId, s3ServiceId, bucketName string, lifecycle interface{}) (*BucketLifecycleResponse, error) } @@ -376,13 +391,13 @@ func (s *ObjectStorageServiceImpl) ListAccessKeys(vpcId, s3ServiceId string) (Ac return AccessKey{}, fmt.Errorf("failed to list access keys: %v", err) } - var accessKeys AccessKey - err = json.Unmarshal(resp, &accessKeys) + var accessKey AccessKey + err = json.Unmarshal(resp, &accessKey) if err != nil { return AccessKey{}, fmt.Errorf("failed to unmarshal access key list response: %v", err) } - return accessKeys, nil + return accessKey, nil } func (s *ObjectStorageServiceImpl) DeleteBucket(vpcId, s3ServiceId, bucketName string) CommonResponse { @@ -445,14 +460,14 @@ func (s *ObjectStorageServiceImpl) UpdateBucketCors(bucketName, vpcId, s3Service return CommonResponse{Status: true}, nil } -func (s *ObjectStorageServiceImpl) GetBucketCors(vpcId, s3ServiceId, bucketName string) (*BucketCors, error) { - apiPath := common.ApiPath.GetBucketCORS(vpcId, s3ServiceId, bucketName) +func (s *ObjectStorageServiceImpl) GetBucketCors(vpcId, s3ServiceId, bucketName string, page, pageSize int) (*BucketCorsResponse, error) { + apiPath := common.ApiPath.GetBucketCORS(vpcId, s3ServiceId, bucketName, page, pageSize) resp, err := s.client.SendGetRequest(apiPath) if err != nil { return nil, fmt.Errorf("failed to get bucket CORS: %v", err) } - var cors BucketCors + var cors BucketCorsResponse if err := json.Unmarshal(resp, &cors); err != nil { return nil, fmt.Errorf("failed to unmarshal bucket CORS: %v", err) } @@ -468,16 +483,16 @@ func (s *ObjectStorageServiceImpl) PutBucketVersioning(vpcId, s3ServiceId, bucke return nil } -func (s *ObjectStorageServiceImpl) GetBucketVersioning(vpcId, s3ServiceId, bucketName string) *BucketVersioningRequest { +func (s *ObjectStorageServiceImpl) GetBucketVersioning(vpcId, s3ServiceId, bucketName string) *BucketVersioningResponse { apiPath := common.ApiPath.GetBucketVersioning(vpcId, s3ServiceId, bucketName) resp, err := s.client.SendGetRequest(apiPath) if err != nil { - return &BucketVersioningRequest{} + return &BucketVersioningResponse{Status: false} } - var versioning BucketVersioningRequest + var versioning BucketVersioningResponse if err := json.Unmarshal(resp, &versioning); err != nil { - return &BucketVersioningRequest{} + return &BucketVersioningResponse{Status: false} } return &versioning } @@ -546,7 +561,7 @@ func (s *ObjectStorageServiceImpl) DeleteSubUser(vpcId, s3ServiceId, subUserId s return nil } -func (s *ObjectStorageServiceImpl) GetBucketLifecycle(vpcId, s3ServiceId, bucketName, page, pageSize string) (*BucketLifecycleResponse, error) { +func (s *ObjectStorageServiceImpl) GetBucketLifecycle(vpcId, s3ServiceId, bucketName string, page, pageSize int) (*BucketLifecycleResponse, error) { apiPath := common.ApiPath.GetBucketLifecycle(vpcId, s3ServiceId, bucketName, page, pageSize) resp, err := s.client.SendGetRequest(apiPath) if err != nil { diff --git a/fptcloud/object-storage/datasource_object_storage_access_key.go b/fptcloud/object-storage/datasource_object_storage_access_key.go index 0876ed7..9296b70 100644 --- a/fptcloud/object-storage/datasource_object_storage_access_key.go +++ b/fptcloud/object-storage/datasource_object_storage_access_key.go @@ -23,26 +23,18 @@ func DataSourceAccessKey() *schema.Resource { Required: true, Description: "The region name that's are the same with the region name in the S3 service. Currently, we have: HCM-01, HCM-02, HN-01, HN-02", }, - "access_keys": { + "credentials": { Type: schema.TypeList, Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "credentials": { - Type: schema.TypeList, + "access_key": { + Type: schema.TypeString, + Computed: true, + }, + "active": { + Type: schema.TypeBool, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "access_key": { - Type: schema.TypeString, - Computed: true, - }, - "active": { - Type: schema.TypeBool, - Computed: true, - }, - }, - }, }, }, }, @@ -58,49 +50,26 @@ func dataSourceAccessKeyRead(ctx context.Context, d *schema.ResourceData, m inte regionName := d.Get("region_name").(string) s3ServiceDetail := getServiceEnableRegion(service, vpcId, regionName) if s3ServiceDetail.S3ServiceId == "" { - return diag.FromErr(fmt.Errorf("region %s is not enabled", d.Get("region_name").(string))) + return diag.FromErr(fmt.Errorf("region %s is not enabled", regionName)) } - _, err := service.ListAccessKeys(vpcId, s3ServiceDetail.S3ServiceId) + keys, err := service.ListAccessKeys(vpcId, s3ServiceDetail.S3ServiceId) if err != nil { return diag.FromErr(err) } - // if len(accessKeys.Credentials) > 0 { - // d.SetId(fmt.Sprintf("access_keys_%d", len(accessKeys))) - // if err := d.Set("access_keys", flattenAccessKeys(accessKeys)); err != nil { - // return diag.FromErr(err) - // } - // } - - return nil -} - -// func flattenAccessKeys(accessKeys AccessKey) []interface{} { -// var result []interface{} -// for _, ak := range accessKeys.Credentials { -// for _, cred := range ak.Credentials { -// credMap := map[string]interface{}{ -// "id": cred.ID, -// "credentials": flattenCredentials(cred.Credentials), -// } -// result = append(result, credMap) -// } -// } -// return result -// } - -func flattenCredentials(credentials []struct { - AccessKey string `json:"accessKey"` - Active bool `json:"active"` - CreatedDate interface{} `json:"createdDate"` -}) []interface{} { - var result []interface{} - for _, cred := range credentials { - credMap := map[string]interface{}{ - "access_key": cred.AccessKey, - "active": cred.Active, + var formattedData []interface{} + for _, key := range keys.Credentials { + for _, cred := range key.Credentials { + formattedData = append(formattedData, map[string]interface{}{ + "access_key": cred.AccessKey, + "active": cred.Active, + }) } - result = append(result, credMap) } - return result + if err := d.Set("credentials", formattedData); err != nil { + return diag.FromErr(fmt.Errorf("error setting data: %v", err)) + } + d.SetId(vpcId) + + return nil } diff --git a/fptcloud/object-storage/datasource_object_storage_bucket_cors.go b/fptcloud/object-storage/datasource_object_storage_bucket_cors.go index 6f5460a..8d5f419 100644 --- a/fptcloud/object-storage/datasource_object_storage_bucket_cors.go +++ b/fptcloud/object-storage/datasource_object_storage_bucket_cors.go @@ -16,21 +16,38 @@ func DataSourceBucketCors() *schema.Resource { "bucket_name": { Type: schema.TypeString, Required: true, - ForceNew: true, Description: "Name of the bucket", }, "vpc_id": { Type: schema.TypeString, Required: true, - ForceNew: true, Description: "The VPC ID", }, - "cors_rule": { - Type: schema.TypeList, + "region_name": { + Type: schema.TypeString, Required: true, - Description: "The bucket cors rule", + ForceNew: true, + Description: "The region name that's are the same with the region name in the S3 service. Currently, we have: HCM-01, HCM-02, HN-01, HN-02", + }, + "page_size": { + Type: schema.TypeInt, + Optional: true, + Description: "The number of items to return in each page", + }, + "page": { + Type: schema.TypeInt, + Optional: true, + Description: "The page number", + }, + "cors_rule": { + Type: schema.TypeList, + Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, "allowed_headers": { Type: schema.TypeList, Required: true, @@ -65,6 +82,7 @@ func DataSourceBucketCors() *schema.Resource { }, }, }, + Description: "The bucket cors rule", }, }, } @@ -79,19 +97,38 @@ func dataSourceBucketCorsRead(ctx context.Context, d *schema.ResourceData, m int return diag.FromErr(fmt.Errorf("region %s is not enabled", d.Get("region_name").(string))) } bucketName := d.Get("bucket_name").(string) - - corsRule, err := service.GetBucketCors(vpcId, s3ServiceDetail.S3ServiceId, bucketName) + page := 1 + if d.Get("page") != nil { + page = d.Get("page").(int) + } + pageSize := 25 + if d.Get("page_size") != nil { + pageSize = d.Get("page_size").(int) + } + corsRule, err := service.GetBucketCors(vpcId, s3ServiceDetail.S3ServiceId, bucketName, page, pageSize) if err != nil { return diag.FromErr(err) } - if corsRule == nil { - d.SetId("") - return nil + if corsRule.Total == 0 { + return diag.Errorf("bucket %s does not have cors rule", bucketName) + } + var formattedData []interface{} + for _, rule := range corsRule.CorsRules { + formattedData = append(formattedData, map[string]interface{}{ + "id": rule.ID, + "allowed_headers": rule.AllowedHeaders, + "allowed_methods": rule.AllowedMethods, + "allowed_origins": rule.AllowedOrigins, + "expose_headers": rule.ExposeHeaders, + "max_age_seconds": rule.MaxAgeSeconds, + }) } - d.SetId(bucketName) - d.Set("cors_rule", corsRule) + if err := d.Set("cors_rule", formattedData); err != nil { + d.SetId("") + return diag.FromErr(err) + } return nil } diff --git a/fptcloud/object-storage/datasource_object_storage_bucket_lifecycle.go b/fptcloud/object-storage/datasource_object_storage_bucket_lifecycle.go new file mode 100644 index 0000000..6e5bf20 --- /dev/null +++ b/fptcloud/object-storage/datasource_object_storage_bucket_lifecycle.go @@ -0,0 +1,160 @@ +package fptcloud_object_storage + +import ( + "context" + "fmt" + common "terraform-provider-fptcloud/commons" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func DataSourceBucketLifecycle() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceBucketLifecycle, + Schema: map[string]*schema.Schema{ + "vpc_id": { + Type: schema.TypeString, + Required: true, + Description: "The VPC ID", + }, + "bucket_name": { + Type: schema.TypeString, + Required: true, + Description: "Name of the bucket to fetch policy for", + }, + "region_name": { + Type: schema.TypeString, + Required: true, + Description: "The region name that's are the same with the region name in the S3 service. Currently, we have: HCM-01, HCM-02, HN-01, HN-02", + }, + "page_size": { + Type: schema.TypeInt, + Optional: true, + Description: "The number of items to return in each page", + }, + "page": { + Type: schema.TypeInt, + Optional: true, + Description: "The page number", + }, + "life_cycle_rules": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "prefix": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "expiration": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "expired_object_delete_marker": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + "noncurrent_version_expiration": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "noncurrent_days": { + Type: schema.TypeInt, + Computed: true, + }, + }, + }, + }, + "abort_incomplete_multipart_upload": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "days_after_initiation": { + Type: schema.TypeInt, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func dataSourceBucketLifecycle(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + client := m.(*common.Client) + service := NewObjectStorageService(client) + + bucketName := d.Get("bucket_name").(string) + vpcId := d.Get("vpc_id").(string) + regionName := d.Get("region_name").(string) + s3ServiceDetail := getServiceEnableRegion(service, vpcId, regionName) + if s3ServiceDetail.S3ServiceId == "" { + return diag.FromErr(fmt.Errorf("region %s is not enabled", regionName)) + } + page := 1 + if d.Get("page") != nil { + page = d.Get("page").(int) + } + pageSize := 25 + if d.Get("page_size") != nil { + pageSize = d.Get("page_size").(int) + } + + lifeCycleResponse, err := service.GetBucketLifecycle(vpcId, s3ServiceDetail.S3ServiceId, bucketName, page, pageSize) + if err != nil { + return diag.FromErr(err) + } + if !lifeCycleResponse.Status { + return diag.FromErr(fmt.Errorf("failed to fetch life cycle rules for bucket %s", bucketName)) + } + d.SetId(bucketName) + var formattedData []interface{} + if lifeCycleResponse.Total == 0 { + d.Set("life_cycle_rules", formattedData) + } + for _, rule := range lifeCycleResponse.Rules { + formattedData = append(formattedData, map[string]interface{}{ + "id": rule.ID, + "prefix": rule.Prefix, + "status": rule.Status, + "expiration": []interface{}{ + map[string]interface{}{ + "expired_object_delete_marker": rule.Expiration.ExpiredObjectDeleteMarker, + }, + }, + "noncurrent_version_expiration": []interface{}{ + map[string]interface{}{ + "noncurrent_days": rule.NoncurrentVersionExpiration.NoncurrentDays, + }, + }, + "abort_incomplete_multipart_upload": []interface{}{ + map[string]interface{}{ + "days_after_initiation": rule.AbortIncompleteMultipartUpload.DaysAfterInitiation, + }, + }, + }) + } + if err := d.Set("life_cycle_rules", formattedData); err != nil { + d.SetId("") + return diag.FromErr(err) + } + return nil +} diff --git a/fptcloud/object-storage/datasource_object_storage_bucket_policy.go b/fptcloud/object-storage/datasource_object_storage_bucket_policy.go index 6200bdc..b7d158b 100644 --- a/fptcloud/object-storage/datasource_object_storage_bucket_policy.go +++ b/fptcloud/object-storage/datasource_object_storage_bucket_policy.go @@ -23,6 +23,11 @@ func DataSourceBucketPolicy() *schema.Resource { Required: true, Description: "Name of the bucket to fetch policy for", }, + "region_name": { + Type: schema.TypeString, + Required: true, + Description: "The region name that's are the same with the region name in the S3 service. Currently, we have: HCM-01, HCM-02, HN-01, HN-02", + }, "policy": { Type: schema.TypeString, Computed: true, @@ -42,17 +47,18 @@ func dataSourceBucketPolicyRead(ctx context.Context, d *schema.ResourceData, m i if s3ServiceDetail.S3ServiceId == "" { return diag.FromErr(fmt.Errorf("region %s is not enabled", d.Get("region_name").(string))) } - policyResponse := service.GetBucketPolicy(vpcId, bucketName, s3ServiceDetail.S3ServiceId) + policyResponse := service.GetBucketPolicy(vpcId, s3ServiceDetail.S3ServiceId, bucketName) if !policyResponse.Status { return diag.Errorf("failed to get bucket policy for bucket %s", bucketName) } - // Set the ID to be a combination of bucket name to ensure unique data source - d.SetId(fmt.Sprintf("bucket_policy_%s", bucketName)) - + // Set the policy field in the schema if err := d.Set("policy", policyResponse.Policy); err != nil { + d.SetId("") return diag.FromErr(err) } + // Set the ID to be a combination of bucket name to ensure unique data source + d.SetId(fmt.Sprintf("bucket_policy_%s", bucketName)) return nil } diff --git a/fptcloud/object-storage/datasource_object_storage_lifecycle.go b/fptcloud/object-storage/datasource_object_storage_bucket_static_website.go similarity index 53% rename from fptcloud/object-storage/datasource_object_storage_lifecycle.go rename to fptcloud/object-storage/datasource_object_storage_bucket_static_website.go index 7da8df9..882334a 100644 --- a/fptcloud/object-storage/datasource_object_storage_lifecycle.go +++ b/fptcloud/object-storage/datasource_object_storage_bucket_static_website.go @@ -9,9 +9,9 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) -func DataSourceBucketLifecycle() *schema.Resource { +func DataSourceBucketStaticWebsite() *schema.Resource { return &schema.Resource{ - ReadContext: dataSourceBucketLifecycle, + ReadContext: dataSourceBucketStaticWebsite, Schema: map[string]*schema.Schema{ "vpc_id": { Type: schema.TypeString, @@ -23,33 +23,24 @@ func DataSourceBucketLifecycle() *schema.Resource { Required: true, Description: "Name of the bucket to fetch policy for", }, - "policy": { - Type: schema.TypeString, - Computed: true, - Description: "The bucket policy in JSON format", - }, "region_name": { Type: schema.TypeString, Required: true, Description: "The region name that's are the same with the region name in the S3 service. Currently, we have: HCM-01, HCM-02, HN-01, HN-02", }, - "page_size": { - Type: schema.TypeString, - Optional: true, - Default: "25", - Description: "The number of items to return in each page", + "index_document_suffix": { + Type: schema.TypeString, + Optional: true, }, - "page": { - Type: schema.TypeString, - Optional: true, - Default: "1", - Description: "The page number", + "error_document_key": { + Type: schema.TypeString, + Optional: true, }, }, } } -func dataSourceBucketLifecycle(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func dataSourceBucketStaticWebsite(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { client := m.(*common.Client) service := NewObjectStorageService(client) @@ -59,18 +50,22 @@ func dataSourceBucketLifecycle(ctx context.Context, d *schema.ResourceData, m in if s3ServiceDetail.S3ServiceId == "" { return diag.FromErr(fmt.Errorf("region %s is not enabled", d.Get("region_name").(string))) } - page := d.Get("page").(string) - pageSize := d.Get("page_size").(string) - lifeCycleResponse, err := service.GetBucketLifecycle(vpcId, s3ServiceDetail.S3ServiceId, bucketName, page, pageSize) - if err != nil { + staticWebsiteResponse := service.GetBucketWebsite(vpcId, s3ServiceDetail.S3ServiceId, bucketName) + if !staticWebsiteResponse.Status { + return diag.Errorf("failed to get bucket static website config for bucket %s", bucketName) + } + if staticWebsiteResponse.Config.IndexDocument.Suffix == "" && staticWebsiteResponse.Config.ErrorDocument.Key == "" { + return diag.Errorf("bucket %s does not have static website configuration", bucketName) + } + if err := d.Set("index_document_suffix", staticWebsiteResponse.Config.IndexDocument.Suffix); err != nil { + d.SetId("") return diag.FromErr(err) } - - d.SetId(fmt.Sprintf("%s-%s", vpcId, bucketName)) - if err := d.Set("policy", lifeCycleResponse.Rules); err != nil { + if err := d.Set("error_document_key", staticWebsiteResponse.Config.ErrorDocument.Key); err != nil { + d.SetId("") return diag.FromErr(err) } - + d.SetId(bucketName) return nil } diff --git a/fptcloud/object-storage/datasource_object_storage_versioning.go b/fptcloud/object-storage/datasource_object_storage_bucket_versioning.go similarity index 84% rename from fptcloud/object-storage/datasource_object_storage_versioning.go rename to fptcloud/object-storage/datasource_object_storage_bucket_versioning.go index 3482ef5..e9eb8f7 100644 --- a/fptcloud/object-storage/datasource_object_storage_versioning.go +++ b/fptcloud/object-storage/datasource_object_storage_bucket_versioning.go @@ -26,7 +26,7 @@ func DataSourceBucketVersioning() *schema.Resource { }, "versioning_status": { Type: schema.TypeString, - Required: true, + Optional: true, Description: "Status of the versioning, must be Enabled or Suspended", ForceNew: true, // Marking this field as ForceNew to ensure that the resource is recreated when the value is changed }, @@ -51,13 +51,16 @@ func dataSourceBucketVersioningRead(ctx context.Context, d *schema.ResourceData, } bucketName := d.Get("bucket_name").(string) - versioning := service.GetBucketVersioning(vpcId, bucketName, s3ServiceDetail.S3ServiceId) - if versioning.Status != "" { - return diag.Errorf("failed to get bucket versioning for bucket %s", bucketName) + versioning := service.GetBucketVersioning(vpcId, s3ServiceDetail.S3ServiceId, bucketName) + if !versioning.Status { + return diag.Errorf("Could not get versioning status for bucket %s", bucketName) } + if err := d.Set("versioning_status", versioning.Config); err != nil { + d.SetId("") + return diag.FromErr(err) + } d.SetId(bucketName) - d.Set("versioning_status", versioning.Status) return nil } diff --git a/fptcloud/object-storage/datasource_object_storage_region.go b/fptcloud/object-storage/datasource_object_storage_region.go index 8324511..60e12d6 100644 --- a/fptcloud/object-storage/datasource_object_storage_region.go +++ b/fptcloud/object-storage/datasource_object_storage_region.go @@ -3,9 +3,7 @@ package fptcloud_object_storage import ( "context" "fmt" - "log" common "terraform-provider-fptcloud/commons" - "time" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -71,9 +69,5 @@ func resourceS3ServiceEnableResponseRead(_ context.Context, d *schema.ResourceDa return diag.FromErr(fmt.Errorf("error setting data: %v", err)) } d.SetId(vpcId) - - log.Println("-----------------") - log.Printf("\n [DEBUG] %v Set Data Result is: %v", time.Now(), d.Get("s3_enable_services")) - log.Println("-----------------") return nil } diff --git a/fptcloud/object-storage/datasource_object_storage_static_website.go b/fptcloud/object-storage/datasource_object_storage_static_website.go deleted file mode 100644 index ece980a..0000000 --- a/fptcloud/object-storage/datasource_object_storage_static_website.go +++ /dev/null @@ -1,144 +0,0 @@ -package fptcloud_object_storage - -import ( - "context" - "fmt" - common "terraform-provider-fptcloud/commons" - - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceBucketStaticWebsite() *schema.Resource { - return &schema.Resource{ - ReadContext: dataSourceBucketStaticWebsite, - Schema: map[string]*schema.Schema{ - "vpc_id": { - Type: schema.TypeString, - Required: true, - Description: "The VPC ID", - }, - "bucket_name": { - Type: schema.TypeString, - Required: true, - Description: "Name of the bucket to fetch policy for", - }, - "region_name": { - Type: schema.TypeString, - Required: true, - Description: "The region name that's are the same with the region name in the S3 service. Currently, we have: HCM-01, HCM-02, HN-01, HN-02", - }, - "status": { - Type: schema.TypeBool, - Computed: true, - Description: "Status of the bucket website configuration", - }, - "request_id": { - Type: schema.TypeString, - Computed: true, - Description: "Request ID of the operation", - }, - "host_id": { - Type: schema.TypeString, - Computed: true, - Description: "Host ID of the operation", - }, - "http_status_code": { - Type: schema.TypeInt, - Computed: true, - Description: "HTTP status code of the operation", - }, - "http_headers": { - Type: schema.TypeMap, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Description: "HTTP headers of the response", - }, - "retry_attempts": { - Type: schema.TypeInt, - Computed: true, - Description: "Number of retry attempts", - }, - "index_document": { - Type: schema.TypeString, - Computed: true, - Description: "Suffix for index document", - ForceNew: true, - }, - "error_document": { - Type: schema.TypeString, - Computed: true, - Description: "Key for error document", - ForceNew: true, - }, - }, - } -} - -func dataSourceBucketStaticWebsite(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - client := m.(*common.Client) - service := NewObjectStorageService(client) - - bucketName := d.Get("bucket_name").(string) - vpcId := d.Get("vpc_id").(string) - s3ServiceDetail := getServiceEnableRegion(service, vpcId, d.Get("region_name").(string)) - if s3ServiceDetail.S3ServiceId == "" { - return diag.FromErr(fmt.Errorf("region %s is not enabled", d.Get("region_name").(string))) - } - - staticWebsiteResponse := service.GetBucketWebsite(vpcId, bucketName, s3ServiceDetail.S3ServiceId) - if !staticWebsiteResponse.Status { - return diag.Errorf("failed to get bucket static website config for bucket %s", bucketName) - } - - // Set the computed values - if err := d.Set("status", staticWebsiteResponse.Status); err != nil { - return diag.FromErr(err) - } - - if staticWebsiteResponse.Config.ResponseMetadata.RequestID != "" { - if err := d.Set("request_id", staticWebsiteResponse.Config.ResponseMetadata.RequestID); err != nil { - return diag.FromErr(err) - } - } - - if staticWebsiteResponse.Config.ResponseMetadata.HostID != "" { - if err := d.Set("host_id", staticWebsiteResponse.Config.ResponseMetadata.HostID); err != nil { - return diag.FromErr(err) - } - } - - if err := d.Set("http_status_code", staticWebsiteResponse.Config.ResponseMetadata.HTTPStatusCode); err != nil { - return diag.FromErr(err) - } - - headers := map[string]string{ - "x-amz-request-id": staticWebsiteResponse.Config.ResponseMetadata.HTTPHeaders.XAmzRequestID, - "content-type": staticWebsiteResponse.Config.ResponseMetadata.HTTPHeaders.ContentType, - "content-length": staticWebsiteResponse.Config.ResponseMetadata.HTTPHeaders.ContentLength, - "date": staticWebsiteResponse.Config.ResponseMetadata.HTTPHeaders.Date, - } - if err := d.Set("http_headers", headers); err != nil { - return diag.FromErr(err) - } - - if err := d.Set("retry_attempts", staticWebsiteResponse.Config.ResponseMetadata.RetryAttempts); err != nil { - return diag.FromErr(err) - } - - if staticWebsiteResponse.Config.IndexDocument.Suffix != "" { - if err := d.Set("index_document", staticWebsiteResponse.Config.IndexDocument.Suffix); err != nil { - return diag.FromErr(err) - } - } - - if staticWebsiteResponse.Config.ErrorDocument.Key != "" { - if err := d.Set("error_document", staticWebsiteResponse.Config.ErrorDocument.Key); err != nil { - return diag.FromErr(err) - } - } - - return nil -} diff --git a/fptcloud/provider.go b/fptcloud/provider.go index 736f118..a30fb20 100644 --- a/fptcloud/provider.go +++ b/fptcloud/provider.go @@ -84,7 +84,8 @@ func Provider() *schema.Provider { "fptcloud_object_storage_lifecycle": fptcloud_object_storage.DataSourceBucketLifecycle(), "fptcloud_object_storage_static_website": fptcloud_object_storage.DataSourceBucketStaticWebsite(), "fptcloud_object_storage_sub_user_detail": fptcloud_object_storage.DataSourceSubUserDetail(), - "fptcloud_s3_service_enable": fptcloud_object_storage.DataSourceS3ServiceEnableResponse(), + "fptcloud_s3_service_enable": fptcloud_object_storage.DataSourceS3ServiceEnableResponse(), + "fptcloud_object_storage_bucket_acl": fptcloud_object_storage.DataSourceBucketAcl(), }, ResourcesMap: map[string]*schema.Resource{ "fptcloud_storage": fptcloud_storage.ResourceStorage(),