From a997f3a1ad09c599692c549d74313ca1002c84d9 Mon Sep 17 00:00:00 2001 From: hoanglm Date: Fri, 15 Nov 2024 02:31:55 +0700 Subject: [PATCH] [Object Storage] update: fix config bucket lifecycle cannot create --- commons/api_path.go | 2 +- .../resource_bucket_policy.tf | 26 +++ .../datasource_object_storage.go | 58 ++++-- ...asource_object_storage_bucket_lifecycle.go | 55 ++++-- .../resource_bucket_lifecycle.go | 170 ++++++++++++++++++ .../object-storage/resource_bucket_policy.go | 80 ++++++--- fptcloud/provider.go | 1 + 7 files changed, 337 insertions(+), 55 deletions(-) create mode 100644 examples/resources/fptcloud_object_storage/resource_bucket_policy.tf create mode 100644 fptcloud/object-storage/resource_bucket_lifecycle.go diff --git a/commons/api_path.go b/commons/api_path.go index 3e643f9..8938a27 100644 --- a/commons/api_path.go +++ b/commons/api_path.go @@ -312,7 +312,7 @@ var ApiPath = struct { return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/bucket/%s/lifecycles?page=%d&page_size=%d", vpcId, s3ServiceId, bucketName, page, pageSize) }, PutBucketLifecycle: func(vpcId, s3ServiceId, bucketName string) string { - return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/bucket/%s/create-bucket-lifecycle-configuration`", vpcId, s3ServiceId, bucketName) + return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/bucket/%s/create-bucket-lifecycle-configuration", vpcId, s3ServiceId, bucketName) }, DeleteBucketLifecycle: func(vpcId, s3ServiceId, bucketName string) string { return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/bucket/%s/delete-bucket-lifecycle-configuration", vpcId, s3ServiceId, bucketName) diff --git a/examples/resources/fptcloud_object_storage/resource_bucket_policy.tf b/examples/resources/fptcloud_object_storage/resource_bucket_policy.tf new file mode 100644 index 0000000..667aec5 --- /dev/null +++ b/examples/resources/fptcloud_object_storage/resource_bucket_policy.tf @@ -0,0 +1,26 @@ +resource "fptcloud_object_storage_bucket_policy" "example_bucket_policy" { + vpc_id = "your_vpc_id" + region_name = "your_region_name" + bucket_name = "your_bucket_name" + + // Option 1: Load policy from file + policy_file = file("${path.module}/your_bucket_policy_json_content.json") + + // Option 2: Inline policy + // policy = jsonencode({ + // Version = "2012-10-17" + // Statement = [ + // { + // Sid = "PublicReadGetObject" + // Effect = "Allow" + // Principal = "*" + // Action = "s3:GetObject" + // Resource = "arn:aws:s3:::example-bucket/*" + // } + // ] + // }) +} +// NOTE: In case wanna delete bucket policy, just ignore policy_file and policy fields +output "bucket_policy" { + value = fptcloud_object_storage_bucket_policy.example_bucket_policy.status +} diff --git a/fptcloud/object-storage/datasource_object_storage.go b/fptcloud/object-storage/datasource_object_storage.go index e29c600..e380136 100644 --- a/fptcloud/object-storage/datasource_object_storage.go +++ b/fptcloud/object-storage/datasource_object_storage.go @@ -201,7 +201,8 @@ type BucketLifecycleResponse struct { Status bool `json:"status"` Rules []struct { Expiration struct { - ExpiredObjectDeleteMarker bool `json:"ExpiredObjectDeleteMarker"` + ExpiredObjectDeleteMarker bool `json:"ExpiredObjectDeleteMarker,omitempty"` + Days int `json:"Days,omitempty"` } `json:"Expiration"` ID string `json:"ID"` Filter struct { @@ -228,6 +229,31 @@ type DetailSubUser struct { AccessKeys []string `json:"access_keys"` } +type S3BucketLifecycleConfig struct { + ID string `json:"ID"` + Filter Filter `json:"Filter"` + Expiration Expiration `json:"Expiration"` + NoncurrentVersionExpiration NoncurrentVersionExpiration `json:"NoncurrentVersionExpiration"` + AbortIncompleteMultipartUpload AbortIncompleteMultipartUpload `json:"AbortIncompleteMultipartUpload"` +} + +type Filter struct { + Prefix string `json:"Prefix"` +} + +type Expiration struct { + Days int `json:"Days,omitempty"` + ExpiredObjectDeleteMarker bool `json:"ExpiredObjectDeleteMarker,omitempty"` +} + +type NoncurrentVersionExpiration struct { + NoncurrentDays int `json:"NoncurrentDays"` +} + +type AbortIncompleteMultipartUpload struct { + DaysAfterInitiation int `json:"DaysAfterInitiation"` +} + // ObjectStorageService defines the interface for object storage operations type ObjectStorageService interface { CheckServiceEnable(vpcId string) S3ServiceEnableResponse @@ -251,7 +277,7 @@ type ObjectStorageService interface { DeleteSubUserAccessKey(vpcId, s3ServiceId, subUserId, accessKeyId string) CommonResponse // bucket configuration - PutBucketPolicy(vpcId, s3ServiceId, bucketName string, policy BucketPolicyRequest) CommonResponse + PutBucketPolicy(vpcId, s3ServiceId, bucketName string, policy interface{}) CommonResponse GetBucketPolicy(vpcId, s3ServiceId, bucketName string) *BucketPolicyResponse // CORS configuration @@ -274,8 +300,8 @@ type ObjectStorageService interface { // Lifecycle configuration GetBucketLifecycle(vpcId, s3ServiceId, bucketName string, page, pageSize int) (*BucketLifecycleResponse, error) - PutBucketLifecycle(vpcId, s3ServiceId, bucketName string, lifecycle interface{}) (*BucketLifecycleResponse, error) - DeleteBucketLifecycle(vpcId, s3ServiceId, bucketName string, lifecycle interface{}) (*BucketLifecycleResponse, error) + PutBucketLifecycle(vpcId, s3ServiceId, bucketName string, lifecycle map[string]interface{}) CommonResponse + DeleteBucketLifecycle(vpcId, s3ServiceId, bucketName string, lifecycle map[string]interface{}) CommonResponse } // ObjectStorageServiceImpl is the implementation of ObjectStorageService @@ -421,7 +447,7 @@ func (s *ObjectStorageServiceImpl) DeleteAccessKey(vpcId, s3ServiceId, accessKey } // Implement bucket policy methods -func (s *ObjectStorageServiceImpl) PutBucketPolicy(vpcId, s3ServiceId, bucketName string, policy BucketPolicyRequest) CommonResponse { +func (s *ObjectStorageServiceImpl) PutBucketPolicy(vpcId, s3ServiceId, bucketName string, policy interface{}) CommonResponse { apiPath := common.ApiPath.PutBucketPolicy(vpcId, s3ServiceId, bucketName) if _, err := s.client.SendPutRequest(apiPath, policy); err != nil { return CommonResponse{Status: false} @@ -575,32 +601,32 @@ func (s *ObjectStorageServiceImpl) GetBucketLifecycle(vpcId, s3ServiceId, bucket return &bucketLifecycle, nil } -func (s *ObjectStorageServiceImpl) PutBucketLifecycle(vpcId, s3ServiceId, bucketName string, lifecycle interface{}) (*BucketLifecycleResponse, error) { +func (s *ObjectStorageServiceImpl) PutBucketLifecycle(vpcId, s3ServiceId, bucketName string, lifecycle map[string]interface{}) CommonResponse { apiPath := common.ApiPath.PutBucketLifecycle(vpcId, s3ServiceId, bucketName) - resp, err := s.client.SendPutRequest(apiPath, lifecycle) + resp, err := s.client.SendPostRequest(apiPath, lifecycle) if err != nil { - return nil, fmt.Errorf("failed to put bucket lifecycle: %v", err) + return CommonResponse{Status: false, Message: err.Error()} } - var bucketLifecycle BucketLifecycleResponse + var bucketLifecycle CommonResponse if err := json.Unmarshal(resp, &bucketLifecycle); err != nil { - return nil, fmt.Errorf("failed to unmarshal bucket lifecycle: %v", err) + return CommonResponse{Status: false, Message: err.Error()} } - return &bucketLifecycle, nil + return bucketLifecycle } -func (s *ObjectStorageServiceImpl) DeleteBucketLifecycle(vpcId, s3ServiceId, bucketName string, lifecycle interface{}) (*BucketLifecycleResponse, error) { +func (s *ObjectStorageServiceImpl) DeleteBucketLifecycle(vpcId, s3ServiceId, bucketName string, lifecycle map[string]interface{}) CommonResponse { apiPath := common.ApiPath.DeleteBucketLifecycle(vpcId, s3ServiceId, bucketName) resp, err := s.client.SendPutRequest(apiPath, lifecycle) if err != nil { - return nil, fmt.Errorf("failed to delete bucket lifecycle: %v", err) + return CommonResponse{Status: false, Message: err.Error()} } - var bucketLifecycle BucketLifecycleResponse + var bucketLifecycle CommonResponse if err := json.Unmarshal(resp, &bucketLifecycle); err != nil { - return nil, fmt.Errorf("failed to unmarshal bucket lifecycle: %v", err) + return CommonResponse{Status: false, Message: err.Error()} } - return &bucketLifecycle, nil + return bucketLifecycle } func (s *ObjectStorageServiceImpl) CreateSubUserAccessKey(vpcId, s3ServiceId, subUserId string) *SubUserCreateKeyResponse { diff --git a/fptcloud/object-storage/datasource_object_storage_bucket_lifecycle.go b/fptcloud/object-storage/datasource_object_storage_bucket_lifecycle.go index 6e5bf20..1866d0c 100644 --- a/fptcloud/object-storage/datasource_object_storage_bucket_lifecycle.go +++ b/fptcloud/object-storage/datasource_object_storage_bucket_lifecycle.go @@ -11,7 +11,7 @@ import ( func DataSourceBucketLifecycle() *schema.Resource { return &schema.Resource{ - ReadContext: dataSourceBucketLifecycle, + ReadContext: dataSourceBucketLifecycleRead, Schema: map[string]*schema.Schema{ "vpc_id": { Type: schema.TypeString, @@ -47,9 +47,17 @@ func DataSourceBucketLifecycle() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "prefix": { - Type: schema.TypeString, + "filter": { + Type: schema.TypeList, Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "prefix": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, }, "status": { Type: schema.TypeString, @@ -98,7 +106,7 @@ func DataSourceBucketLifecycle() *schema.Resource { } } -func dataSourceBucketLifecycle(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func dataSourceBucketLifecycleRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { client := m.(*common.Client) service := NewObjectStorageService(client) @@ -130,28 +138,43 @@ func dataSourceBucketLifecycle(ctx context.Context, d *schema.ResourceData, m in if lifeCycleResponse.Total == 0 { d.Set("life_cycle_rules", formattedData) } - for _, rule := range lifeCycleResponse.Rules { - formattedData = append(formattedData, map[string]interface{}{ - "id": rule.ID, - "prefix": rule.Prefix, - "status": rule.Status, - "expiration": []interface{}{ + for _, lifecycleRule := range lifeCycleResponse.Rules { + data := map[string]interface{}{ + "id": lifecycleRule.ID, + "status": lifecycleRule.Status, + "noncurrent_version_expiration": []interface{}{ map[string]interface{}{ - "expired_object_delete_marker": rule.Expiration.ExpiredObjectDeleteMarker, + "noncurrent_days": lifecycleRule.NoncurrentVersionExpiration.NoncurrentDays, }, }, - "noncurrent_version_expiration": []interface{}{ + "abort_incomplete_multipart_upload": []interface{}{ map[string]interface{}{ - "noncurrent_days": rule.NoncurrentVersionExpiration.NoncurrentDays, + "days_after_initiation": lifecycleRule.AbortIncompleteMultipartUpload.DaysAfterInitiation, }, }, - "abort_incomplete_multipart_upload": []interface{}{ + "filter": []interface{}{ map[string]interface{}{ - "days_after_initiation": rule.AbortIncompleteMultipartUpload.DaysAfterInitiation, + "prefix": lifecycleRule.Filter.Prefix, }, }, - }) + } + if lifecycleRule.Expiration.Days != 0 { + data["expiration"] = []interface{}{ + map[string]interface{}{ + "days": lifecycleRule.Expiration.Days, + }, + } + } + if lifecycleRule.Expiration.ExpiredObjectDeleteMarker { + data["expiration"] = []interface{}{ + map[string]interface{}{ + "expired_object_delete_marker": lifecycleRule.Expiration.ExpiredObjectDeleteMarker, + }, + } + } + formattedData = append(formattedData, data) } + fmt.Println("DEBUG: formattedData", formattedData) if err := d.Set("life_cycle_rules", formattedData); err != nil { d.SetId("") return diag.FromErr(err) diff --git a/fptcloud/object-storage/resource_bucket_lifecycle.go b/fptcloud/object-storage/resource_bucket_lifecycle.go new file mode 100644 index 0000000..48d1c4c --- /dev/null +++ b/fptcloud/object-storage/resource_bucket_lifecycle.go @@ -0,0 +1,170 @@ +package fptcloud_object_storage + +import ( + "context" + "encoding/json" + "fmt" + common "terraform-provider-fptcloud/commons" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" +) + +func ResourceBucketLifeCycle() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceBucketLifeCycleCreate, + UpdateContext: nil, + DeleteContext: resourceBucketLifeCycleDelete, + ReadContext: dataSourceBucketLifecycleRead, + Schema: map[string]*schema.Schema{ + "vpc_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The VPC ID", + }, + "bucket_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Name of the bucket", + }, + "region_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The region name that's are the same with the region name in the S3 service. Currently, we have: HCM-01, HCM-02, HN-01, HN-02", + }, + "life_cycle_rule": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The bucket lifecycle rule in JSON format, support only one rule", + ConflictsWith: []string{"life_cycle_rule_file"}, + ValidateFunc: validation.StringIsJSON, + }, + "life_cycle_rule_file": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Path to the JSON file containing the bucket lifecycle rule, support only one rule", + ConflictsWith: []string{"life_cycle_rule"}, + }, + "status": { + Type: schema.TypeBool, + Computed: true, + Description: "Status after bucket lifecycle rule is created", + }, + }, + } +} + +func resourceBucketLifeCycleCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + client := m.(*common.Client) + service := NewObjectStorageService(client) + bucketName := d.Get("bucket_name").(string) + regionName := d.Get("region_name").(string) + vpcId := d.Get("vpc_id").(string) + + var lifecycleRuleContent string + if v, ok := d.GetOk("life_cycle_rule"); ok { + lifecycleRuleContent = v.(string) + } else if v, ok := d.GetOk("life_cycle_rule_file"); ok { + // The actual file reading is handled by Terraform's built-in file() function + // in the configuration, so we just get the content here + lifecycleRuleContent = v.(string) + } else { + return diag.FromErr(fmt.Errorf("either 'life_cycle_rule' or 'life_cycle_rule_file' must be specified")) + } + s3ServiceDetail := getServiceEnableRegion(service, vpcId, regionName) + if s3ServiceDetail.S3ServiceId == "" { + return diag.FromErr(fmt.Errorf("region %s is not enabled", regionName)) + } + var jsonMap S3BucketLifecycleConfig + err := json.Unmarshal([]byte(lifecycleRuleContent), &jsonMap) + if err != nil { + return diag.FromErr(err) + } + payload := map[string]interface{}{ + "ID": jsonMap.ID, + "Filter": map[string]interface{}{"Prefix": jsonMap.Filter.Prefix}, + "NoncurrentVersionExpiration": map[string]interface{}{"NoncurrentDays": jsonMap.NoncurrentVersionExpiration.NoncurrentDays}, + "AbortIncompleteMultipartUpload": map[string]interface{}{"DaysAfterInitiation": jsonMap.AbortIncompleteMultipartUpload.DaysAfterInitiation}, + } + if jsonMap.Expiration.Days != 0 && jsonMap.Expiration.ExpiredObjectDeleteMarker { + return diag.FromErr(fmt.Errorf("Expiration.Days and Expiration.ExpiredObjectDeleteMarker cannot be set at the same time")) + } + if jsonMap.Expiration.Days != 0 { + payload["Expiration"] = map[string]interface{}{"Days": jsonMap.Expiration.Days} + } + if jsonMap.Expiration.ExpiredObjectDeleteMarker { + payload["Expiration"] = map[string]interface{}{"ExpiredObjectDeleteMarker": jsonMap.Expiration.ExpiredObjectDeleteMarker} + } + r := service.PutBucketLifecycle(vpcId, s3ServiceDetail.S3ServiceId, bucketName, payload) + if !r.Status { + d.Set("status", false) + return diag.FromErr(fmt.Errorf("%s", r.Message)) + } + d.SetId(bucketName) + if err := d.Set("status", true); err != nil { + return diag.FromErr(err) + } + + return nil +} + +func resourceBucketLifeCycleDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + client := m.(*common.Client) + service := NewObjectStorageService(client) + bucketName := d.Get("bucket_name").(string) + vpcId := d.Get("vpc_id").(string) + regionName := d.Get("region_name").(string) + s3ServiceDetail := getServiceEnableRegion(service, vpcId, regionName) + if s3ServiceDetail.S3ServiceId == "" { + return diag.FromErr(fmt.Errorf("region %s is not enabled", regionName)) + } + var lifecycleRuleContent string + if v, ok := d.GetOk("life_cycle_rule"); ok { + lifecycleRuleContent = v.(string) + } else if v, ok := d.GetOk("life_cycle_rule_file"); ok { + // The actual file reading is handled by Terraform's built-in file() function + // in the configuration, so we just get the content here + lifecycleRuleContent = v.(string) + } else { + return diag.FromErr(fmt.Errorf("either 'life_cycle_rule' or 'life_cycle_rule_file' must be specified")) + } + var jsonMap S3BucketLifecycleConfig + err := json.Unmarshal([]byte(lifecycleRuleContent), &jsonMap) + if err != nil { + return diag.FromErr(err) + } + payload := map[string]interface{}{ + "ID": jsonMap.ID, + "Filter": map[string]interface{}{"Prefix": jsonMap.Filter.Prefix}, + "NoncurrentVersionExpiration": map[string]interface{}{"NoncurrentDays": jsonMap.NoncurrentVersionExpiration.NoncurrentDays}, + "AbortIncompleteMultipartUpload": map[string]interface{}{"DaysAfterInitiation": jsonMap.AbortIncompleteMultipartUpload.DaysAfterInitiation}, + "OrgID": jsonMap.ID, + "Status": "Enabled", + } + if jsonMap.Expiration.Days != 0 && jsonMap.Expiration.ExpiredObjectDeleteMarker { + return diag.FromErr(fmt.Errorf("Expiration.Days and Expiration.ExpiredObjectDeleteMarker cannot be set at the same time")) + } + if jsonMap.Expiration.Days != 0 { + payload["Expiration"] = map[string]interface{}{"Days": jsonMap.Expiration.Days} + } + if jsonMap.Expiration.ExpiredObjectDeleteMarker { + payload["Expiration"] = map[string]interface{}{"ExpiredObjectDeleteMarker": jsonMap.Expiration.ExpiredObjectDeleteMarker} + } + fmt.Println("DATA OF ME PLEASE: ", payload) + r := service.DeleteBucketLifecycle(vpcId, s3ServiceDetail.S3ServiceId, bucketName, payload) + if !r.Status { + d.Set("status", false) + return diag.FromErr(fmt.Errorf("%s", r.Message)) + } + d.SetId(bucketName) + if err := d.Set("status", true); err != nil { + return diag.FromErr(err) + } + return nil +} diff --git a/fptcloud/object-storage/resource_bucket_policy.go b/fptcloud/object-storage/resource_bucket_policy.go index 8a86664..b1d257d 100644 --- a/fptcloud/object-storage/resource_bucket_policy.go +++ b/fptcloud/object-storage/resource_bucket_policy.go @@ -2,30 +2,27 @@ package fptcloud_object_storage import ( "context" + "encoding/json" "fmt" common "terraform-provider-fptcloud/commons" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) func ResourceBucketPolicy() *schema.Resource { return &schema.Resource{ CreateContext: resourceBucketPolicyCreate, - UpdateContext: resourceBucketPolicyUpdate, + UpdateContext: nil, DeleteContext: resourceBucketPolicyDelete, ReadContext: dataSourceBucketPolicyRead, Schema: map[string]*schema.Schema{ - "bucket_name": { + "vpc_id": { Type: schema.TypeString, Required: true, ForceNew: true, - Description: "Name of the bucket", - }, - "policy": { - Type: schema.TypeString, - Required: true, - Description: "The bucket policy in JSON format", + Description: "The VPC ID", }, "region_name": { Type: schema.TypeString, @@ -33,11 +30,31 @@ func ResourceBucketPolicy() *schema.Resource { ForceNew: true, Description: "The region name that's are the same with the region name in the S3 service. Currently, we have: HCM-01, HCM-02, HN-01, HN-02", }, - "vpc_id": { + "bucket_name": { Type: schema.TypeString, Required: true, ForceNew: true, - Description: "The VPC ID", + Description: "Name of the bucket", + }, + "policy": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The bucket policy in JSON format", + ConflictsWith: []string{"policy_file"}, + ValidateFunc: validation.StringIsJSON, + }, + "policy_file": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Path to the JSON file containing the bucket policy", + ConflictsWith: []string{"policy"}, + }, + "status": { + Type: schema.TypeBool, + Computed: true, + Description: "Status after bucket policy is created", }, }, } @@ -48,28 +65,47 @@ func resourceBucketPolicyCreate(ctx context.Context, d *schema.ResourceData, m i service := NewObjectStorageService(client) bucketName := d.Get("bucket_name").(string) - policy := d.Get("policy").(string) vpcId := d.Get("vpc_id").(string) regionName := d.Get("region_name").(string) + + // Get policy content either from policy or policy_file + var policyContent string + if v, ok := d.GetOk("policy"); ok { + policyContent = v.(string) + } else if v, ok := d.GetOk("policy_file"); ok { + // The actual file reading is handled by Terraform's built-in file() function + // in the configuration, so we just get the content here + policyContent = v.(string) + } else { + return diag.FromErr(fmt.Errorf("either 'policy' or 'policy_file' must be specified")) + } + s3ServiceDetail := getServiceEnableRegion(service, vpcId, regionName) if s3ServiceDetail.S3ServiceId == "" { - return diag.FromErr(fmt.Errorf("region %s is not enabled", d.Get("region_name").(string))) + return diag.FromErr(fmt.Errorf("region %s is not enabled", regionName)) } - - resp := service.PutBucketPolicy(vpcId, s3ServiceDetail.S3ServiceId, bucketName, BucketPolicyRequest{ - Policy: policy, - }) + var jsonMap map[string]interface{} + err := json.Unmarshal([]byte(policyContent), &jsonMap) + if err != nil { + return diag.FromErr(err) + } + // Reverse from string into json object for matching with the API request + payload := map[string]interface{}{ + "policy": jsonMap, + } + resp := service.PutBucketPolicy(vpcId, s3ServiceDetail.S3ServiceId, bucketName, payload) if !resp.Status { - return diag.Errorf("failed to create bucket policy for bucket %s", bucketName) + d.Set("status", false) + return diag.Errorf(fmt.Sprintf("Error create bucket policy: %s", resp.Message)) } - d.SetId(bucketName) - return nil -} + if err := d.Set("status", true); err != nil { + d.SetId("") + return diag.FromErr(err) + } -func resourceBucketPolicyUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - return resourceBucketPolicyCreate(ctx, d, m) + return nil } func resourceBucketPolicyDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { diff --git a/fptcloud/provider.go b/fptcloud/provider.go index a30fb20..bfe1c44 100644 --- a/fptcloud/provider.go +++ b/fptcloud/provider.go @@ -106,6 +106,7 @@ func Provider() *schema.Provider { "fptcloud_object_storage_bucket_static_website": fptcloud_object_storage.ResourceBucketStaticWebsite(), "fptcloud_object_storage_bucket_acl": fptcloud_object_storage.ResourceBucketAcl(), "fptcloud_object_storage_sub_user_key": fptcloud_object_storage.ResourceSubUserKeys(), + "fptcloud_object_storage_bucket_lifecycle": fptcloud_object_storage.ResourceBucketLifeCycle(), }, ConfigureContextFunc: providerConfigureContext, }