From 79c0bbd0db3463543eb95e5a633c32ef7134fc1b Mon Sep 17 00:00:00 2001 From: hoanglm Date: Tue, 22 Oct 2024 15:59:25 +0700 Subject: [PATCH 1/8] [Object Storage] feat: implement Object Storage service [Object Storage] update: fix bug bucket config --- commons/api_path.go | 150 +++++ commons/client.go | 3 + .../datasource_object_storage.go | 609 ++++++++++++++++++ .../datasource_object_storage_access_key.go | 102 +++ .../datasource_object_storage_bucket.go | 71 ++ .../datasource_object_storage_bucket_cors.go | 93 +++ ...datasource_object_storage_bucket_policy.go | 55 ++ .../datasource_object_storage_lifecycle.go | 75 +++ ...atasource_object_storage_static_website.go | 144 +++++ .../datasource_object_storage_sub_user.go | 57 ++ ...tasource_object_storage_sub_user_detail.go | 93 +++ .../datasource_object_storage_versioning.go | 52 ++ .../object-storage/resource_access_key.go | 115 ++++ fptcloud/object-storage/resource_bucket.go | 131 ++++ .../object-storage/resource_bucket_cors.go | 146 +++++ .../object-storage/resource_bucket_policy.go | 91 +++ .../resource_bucket_static_website.go | 108 ++++ .../resource_bucket_versioning.go | 74 +++ fptcloud/object-storage/resource_sub_user.go | 112 ++++ fptcloud/provider.go | 93 +-- main.go | 7 +- 21 files changed, 2342 insertions(+), 39 deletions(-) create mode 100644 fptcloud/object-storage/datasource_object_storage.go create mode 100644 fptcloud/object-storage/datasource_object_storage_access_key.go create mode 100644 fptcloud/object-storage/datasource_object_storage_bucket.go create mode 100644 fptcloud/object-storage/datasource_object_storage_bucket_cors.go create mode 100644 fptcloud/object-storage/datasource_object_storage_bucket_policy.go create mode 100644 fptcloud/object-storage/datasource_object_storage_lifecycle.go create mode 100644 fptcloud/object-storage/datasource_object_storage_static_website.go create mode 100644 fptcloud/object-storage/datasource_object_storage_sub_user.go create mode 100644 fptcloud/object-storage/datasource_object_storage_sub_user_detail.go create mode 100644 fptcloud/object-storage/datasource_object_storage_versioning.go create mode 100644 fptcloud/object-storage/resource_access_key.go create mode 100644 fptcloud/object-storage/resource_bucket.go create mode 100644 fptcloud/object-storage/resource_bucket_cors.go create mode 100644 fptcloud/object-storage/resource_bucket_policy.go create mode 100644 fptcloud/object-storage/resource_bucket_static_website.go create mode 100644 fptcloud/object-storage/resource_bucket_versioning.go create mode 100644 fptcloud/object-storage/resource_sub_user.go diff --git a/commons/api_path.go b/commons/api_path.go index 9a496b7..7d66de6 100644 --- a/commons/api_path.go +++ b/commons/api_path.go @@ -2,6 +2,8 @@ package commons import "fmt" +const ObjectStorageApiPrefix = "/v1/vmware/vpc" + var ApiPath = struct { SSH string Storage func(vpcId string) string @@ -58,6 +60,49 @@ var ApiPath = struct { ManagedFKEDelete func(vpcId string, platform string, clusterName string) string ManagedFKECreate func(vpcId string, platform string) string GetFKEOSVersion func(vpcId string, platform string) string + + // Object Storage + // Common + CheckS3ServiceEnable func(vpcId string) string + + // Bucket + ListBuckets func(vpcId, s3ServiceId string, page, pageSize int) string + CreateBucket func(vpcId, s3ServiceId string) string + DeleteBucket func(vpcId, s3ServiceId, bucketName string) string + // Bucket Policy + GetBucketPolicy func(vpcId, s3ServiceId, bucketName string) string + PutBucketPolicy func(vpcId, s3ServiceId, bucketName string) string + // Bucket Static Website + GetBucketWebsite func(vpcId, s3ServiceId, bucketName string) string + PutBucketWebsite func(vpcId, s3ServiceId, bucketName string) string + DeleteBucketStaticWebsite func(vpcId, s3ServiceId, bucketName string) string + // Bucket Versioning + GetBucketVersioning func(vpcId, s3ServiceId, bucketName string) string + PutBucketVersioning func(vpcId, s3ServiceId, bucketName string) string + // Bucket Lifecycle + GetBucketLifecycle func(vpcId, s3ServiceId, bucketName, page, pageSize string) string + PutBucketLifecycle func(vpcId, s3ServiceId, bucketName string) string + DeleteBucketLifecycle func(vpcId, s3ServiceId, bucketName string) string + // Bucket CORS + GetBucketCORS func(vpcId, s3ServiceId, bucketName string) string + PutBucketCORS func(vpcId, s3ServiceId, bucketName string) string + CreateBucketCors func(vpcId, s3ServiceId, bucketName string) string + // Bucket ACL + GetBucketAcl func(vpcId, s3ServiceId, bucketName string) string + PutBucketAcl func(vpcId, s3ServiceId, bucketName string) string + + // Sub-user + ListSubUsers func(vpcId, s3ServiceId string) string + CreateSubUser func(vpcId, s3ServiceId string) string + UpdateSubUser func(vpcId, s3ServiceId, subUserId string) string + DeleteSubUser func(vpcId, s3ServiceId, subUserId string) string + DetailSubUser func(vpcId, s3ServiceId, subUserId string) string + CreateSubUserAccessKey func(vpcId, s3ServiceId, subUserId string) string + DeleteSubUserAccessKey func(vpcId, s3ServiceId, subUserId, accessKeyId string) string + // Access Key + ListAccessKeys func(vpcId, s3ServiceId string) string + CreateAccessKey func(vpcId, s3ServiceId string) string + DeleteAccessKey func(vpcId, s3ServiceId string) string }{ SSH: "/v1/user/sshs", Storage: func(vpcId string) string { @@ -219,4 +264,109 @@ var ApiPath = struct { GetFKEOSVersion: func(vpcId string, platform string) string { return fmt.Sprintf("/v1/xplat/fke/vpc/%s/m-fke/%s/get_k8s_versions", vpcId, platform) }, + + // Object Storage + // Common + CheckS3ServiceEnable: func(vpcId string) string { + fmt.Println("vpcId: ", vpcId) + return fmt.Sprintf("/v1/vmware/vpc/%s/s3/check-service-enabled?check_unlimited=undefined", vpcId) + }, + + // Bucket + ListBuckets: func(vpcId, s3ServiceId string, page, pageSize int) string { + return fmt.Sprintf("/v1/vmware/vpc/%s/s3/buckets?page=%d&page_size=%d&s3_service_id=%s", vpcId, page, pageSize, s3ServiceId) + }, + CreateBucket: func(vpcId, s3ServiceId string) string { + return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/buckets/create", vpcId, s3ServiceId) + }, + + DeleteBucket: func(vpcId, s3ServiceId, bucketName string) string { + return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/buckets/%s/delete", vpcId, s3ServiceId, bucketName) + }, + + // Bucket Versioning + GetBucketVersioning: func(vpcId, s3ServiceId, bucketName string) string { + return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/bucket/%s/get-versioning", vpcId, s3ServiceId, bucketName) + }, + PutBucketVersioning: func(vpcId, s3ServiceId, bucketName string) string { + return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/bucket/%s/put-versioning", vpcId, s3ServiceId, bucketName) + }, + // Bucket Policy + GetBucketPolicy: func(vpcId, s3ServiceId, bucketName string) string { + return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/bucket/%s/get-policy", vpcId, s3ServiceId, bucketName) + }, + PutBucketPolicy: func(vpcId, s3ServiceId, bucketName string) string { + return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/bucket/%s/put-policy", vpcId, s3ServiceId, bucketName) + }, + // Bucket Static Website + GetBucketWebsite: func(vpcId, s3ServiceId, bucketName string) string { + return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/bucket/%s/get-config", vpcId, s3ServiceId, bucketName) + }, + PutBucketWebsite: func(vpcId, s3ServiceId, bucketName string) string { + return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/bucket/%s/put-config", vpcId, s3ServiceId, bucketName) + }, + DeleteBucketStaticWebsite: func(vpcId, s3ServiceId, bucketName string) string { + return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/bucket/%s/delete-config", vpcId, s3ServiceId, bucketName) + }, + // Bucket Lifecycle + GetBucketLifecycle: func(vpcId, s3ServiceId, bucketName, page, pageSize string) string { + return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/bucket/%s/lifecycles?page=%s&page_size=%s", vpcId, s3ServiceId, bucketName, page, pageSize) + }, + PutBucketLifecycle: func(vpcId, s3ServiceId, bucketName string) string { + return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/bucket/%s/create-bucket-lifecycle-configuration`", vpcId, s3ServiceId, bucketName) + }, + DeleteBucketLifecycle: func(vpcId, s3ServiceId, bucketName string) string { + return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/bucket/%s/delete-bucket-lifecycle-configuration", vpcId, s3ServiceId, bucketName) + }, + // Bucket CORS + GetBucketCORS: func(vpcId, s3ServiceId, bucketName string) string { + return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/bucket/%s/cors", vpcId, s3ServiceId, bucketName) + }, + PutBucketCORS: func(vpcId, s3ServiceId, bucketName string) string { + return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/bucket/%s/put-bucket-cors", vpcId, s3ServiceId, bucketName) + }, + CreateBucketCors: func(vpcId, s3ServiceId, bucketName string) string { + return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/bucket/%s/create-bucket-cors", vpcId, s3ServiceId, bucketName) + }, + // Bucket ACL + GetBucketAcl: func(vpcId, s3ServiceId, bucketName string) string { + return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/bucket/%s/acl", vpcId, s3ServiceId, bucketName) + }, + PutBucketAcl: func(vpcId, s3ServiceId, bucketName string) string { + return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/bucket/%s/acl", vpcId, s3ServiceId, bucketName) + }, + // Sub-user + ListSubUsers: func(vpcId, serviceId string) string { + return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/sub-users/list", vpcId, serviceId) + }, + CreateSubUser: func(vpcId, s3ServiceId string) string { + return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/sub-users/create", vpcId, s3ServiceId) + }, + UpdateSubUser: func(vpcId, s3ServiceId, subUserId string) string { + return fmt.Sprintf("/v1/vmware/vpc/%s/s3/sub-users/%s/update", vpcId, subUserId) + }, + DeleteSubUser: func(vpcId, s3ServiceId, subUserId string) string { + return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/sub-users/%s/delete", vpcId, s3ServiceId, subUserId) + }, + CreateSubUserAccessKey: func(vpcId, s3ServiceId, subUserId string) string { + return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/sub-users/%s/credentials/create", vpcId, s3ServiceId, subUserId) + }, + DeleteSubUserAccessKey: func(vpcId, s3ServiceId, subUserId, accessKeyId string) string { + return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/sub-users/%s/credentials/%s/delete", vpcId, s3ServiceId, subUserId, accessKeyId) + }, + DetailSubUser: func(vpcId, s3ServiceId, subUserId string) string { + return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/sub-users/%s/detail", vpcId, s3ServiceId, subUserId) + }, + + // Access Key + ListAccessKeys: func(vpcId, s3ServiceId string) string { + return fmt.Sprintf("/v1/vmware/vpc/%s/s3/user/credentials?s3_service_id=%s", vpcId, s3ServiceId) + }, + CreateAccessKey: func(vpcId, s3ServiceId string) string { + return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/user/credentials", vpcId, s3ServiceId) + }, + // https://console-api.fptcloud.com/api/v1/vmware/vpc/1dce0aa0-a78d-4e19-89a3-d688bcff7f1b/s3/d8c82109-3d17-4ac2-8b21-5fedb2d81c54/user/credentials/delete + DeleteAccessKey: func(vpcId, s3ServiceId string) string { + return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/user/credentials/delete", vpcId, s3ServiceId) + }, } diff --git a/commons/client.go b/commons/client.go index bbf9406..4eee795 100644 --- a/commons/client.go +++ b/commons/client.go @@ -112,6 +112,9 @@ func (c *Client) SendRequest(req *http.Request) ([]byte, error) { body, err := io.ReadAll(resp.Body) c.LastJSONResponse = string(body) + fmt.Println("resp.StatusCode: ", resp.StatusCode) + fmt.Println("resp.BODY: ", string(body)) + fmt.Println("resp.URL: ", resp.Request.URL.String()) if resp.StatusCode >= 300 { return nil, HTTPError{Code: resp.StatusCode, Status: resp.Status, Reason: string(body)} diff --git a/fptcloud/object-storage/datasource_object_storage.go b/fptcloud/object-storage/datasource_object_storage.go new file mode 100644 index 0000000..37afbcb --- /dev/null +++ b/fptcloud/object-storage/datasource_object_storage.go @@ -0,0 +1,609 @@ +package fptcloud_object_storage + +import ( + "encoding/json" + "fmt" + common "terraform-provider-fptcloud/commons" + "time" +) + +// SubUserCreateRequest represents the request body for creating a sub-user +type SubUserCreateRequest struct { + Username string `json:"username"` + DisplayName string `json:"display_name"` + Email string `json:"email"` + Permissions []string `json:"permissions"` +} +type AccessKey struct { + Credentials []struct { + ID string `json:"id"` + Credentials []struct { + AccessKey string `json:"accessKey"` + Active bool `json:"active"` + CreatedDate interface{} `json:"createdDate"` + } `json:"credentials"` + } `json:"credentials"` +} +type CreateAccessKeyResponse struct { + Status bool `json:"status"` + Message string `json:"message,omitempty"` + Credential struct { + AccessKey string `json:"accessKey"` + SecretKey string `json:"secretKey"` + Active interface{} `json:"active"` + CreatedDate interface{} `json:"createdDate"` + } `json:"credential,omitempty"` +} +type SubUser struct { + Role string `json:"role"` + UserId string `json:"user_id,omitempty"` +} +type CommonResponse struct { + Status bool `json:"status"` + Message string `json:"message,omitempty"` +} +type CorsRule struct { + ID string `json:"ID,omitempty"` + AllowedOrigins []string `json:"AllowedOrigins"` + AllowedMethods []string `json:"AllowedMethods"` + MaxAgeSeconds int `json:"MaxAgeSeconds,omitempty"` + ExposeHeaders []string `json:"ExposeHeaders,omitempty"` + AllowedHeaders []string `json:"AllowedHeaders,omitempty"` +} +type BucketCors struct { + CorsRules []CorsRule `json:"CORSRules"` +} +type BucketCorsResponse struct { + Status bool `json:"status"` + Total int `json:"total"` + CorsRules []CorsRule `json:"cors_rules"` +} + +type BucketPolicyResponse struct { + Status bool `json:"status"` + Policy string `json:"policy"` +} +type BucketPolicyRequest struct { + Policy string `json:"policy"` +} +type Statement struct { + Sid string `json:"Sid"` + Effect string `json:"Effect"` + Principal map[string]interface{} `json:"Principal"` + Action []string `json:"Action"` + Resource []string `json:"Resource"` +} + +type BucketVersioningRequest struct { + Status string `json:"status"` // "Enabled" or "Suspended" +} + +type BucketAclResponse struct { + Status bool `json:"status"` + Owner struct { + DisplayName string `json:"DisplayName"` + ID string `json:"ID"` + } `json:"Owner"` + Grants []struct { + Grantee struct { + DisplayName string `json:"DisplayName"` + ID string `json:"ID"` + Type string `json:"Type"` + } `json:"Grantee"` + Permission string `json:"Permission"` + } `json:"Grants"` + CannedACL string `json:"CannedACL"` +} +type BucketAclRequest struct { + CannedAcl string `json:"cannedAcl"` + ApplyObjects bool `json:"applyObjects"` +} +type PutBucketAclResponse struct { + Status bool `json:"status"` + // TaskID may be empty if applyObjects is false, if applyObjects is true, the taskID will be returned + TaskID string `json:"taskId"` +} +type BucketWebsiteRequest struct { + Key string `json:"key"` + Suffix string `json:"suffix"` + Bucket string `json:"bucket"` +} +type BucketWebsiteResponse struct { + Status bool `json:"status"` + Config struct { + ResponseMetadata struct { + RequestID string `json:"RequestId"` + HostID string `json:"HostId"` + HTTPStatusCode int `json:"HTTPStatusCode"` + HTTPHeaders struct { + XAmzRequestID string `json:"x-amz-request-id"` + ContentType string `json:"content-type"` + ContentLength string `json:"content-length"` + Date string `json:"date"` + } `json:"HTTPHeaders"` + RetryAttempts int `json:"RetryAttempts"` + } `json:"ResponseMetadata"` + IndexDocument struct { + Suffix string `json:"Suffix"` + } `json:"IndexDocument"` + ErrorDocument struct { + Key string `json:"Key"` + } `json:"ErrorDocument"` + } `json:"config,omitempty"` +} + +type S3ServiceEnableResponse struct { + Data []struct { + S3ServiceName string `json:"s3_service_name"` + S3ServiceID string `json:"s3_service_id"` + S3Platform string `json:"s3_platform"` + DefaultUser interface{} `json:"default_user"` + MigrateQuota int `json:"migrate_quota"` + SyncQuota int `json:"sync_quota"` + RgwTotalNodes int `json:"rgw_total_nodes"` + RgwUserActiveNodes int `json:"rgw_user_active_nodes"` + HasUnusualConfig interface{} `json:"has_unusual_config"` + } `json:"data"` + Total int `json:"total"` +} + +// Bucket represents the response structure for a created bucket +type BucketRequest struct { + Name string `json:"name"` + Region string `json:"region"` + Versioning string `json:"versioning"` + Acl string `json:"acl"` +} +type ListBucketResponse struct { + Buckets []struct { + Name string `json:"Name"` + CreationDate time.Time `json:"CreationDate"` + IsEmpty bool `json:"isEmpty"` + S3ServiceID string `json:"s3_service_id"` + IsEnabledLogging bool `json:"isEnabledLogging"` + Endpoint string `json:"endpoint"` + } `json:"buckets"` + Total int `json:"total"` +} +type BucketLifecycleResponse struct { + Status bool `json:"status"` + Rules []struct { + Expiration struct { + Days int `json:"Days"` + } `json:"Expiration"` + ID string `json:"ID"` + Filter struct { + Prefix string `json:"Prefix"` + } `json:"Filter"` + Status string `json:"Status"` + AbortIncompleteMultipartUpload struct { + DaysAfterInitiation int `json:"DaysAfterInitiation"` + } `json:"AbortIncompleteMultipartUpload"` + } `json:"rules"` + Total int `json:"total"` +} + +type DetailSubUser struct { + UserID string `json:"user_id"` + Arn interface{} `json:"arn"` + Active bool `json:"active"` + Role string `json:"role"` + CreatedAt interface{} `json:"created_at"` + AccessKeys []string `json:"access_keys"` +} + +// ObjectStorageService defines the interface for object storage operations +type ObjectStorageService interface { + CheckServiceEnable(vpcId string) S3ServiceEnableResponse + + // Bucket + ListBuckets(vpcId, s3ServiceId string, page, pageSize int) ListBucketResponse + CreateBucket(req BucketRequest, vpcId, s3ServiceId string) CommonResponse + DeleteBucket(vpcId, s3ServiceId, bucketName string) error + + // Access key + ListAccessKeys(vpcId, s3ServiceId string) (AccessKey, error) + DeleteAccessKey(vpcId, s3ServiceId, accessKeyId string) error + CreateAccessKey(vpcId, s3ServiceId string) *CreateAccessKeyResponse + + // Sub user + CreateSubUser(req SubUser, vpcId, s3ServiceId string) (*SubUser, error) + DeleteSubUser(vpcId, s3ServiceId, subUserId string) error + ListSubUsers(vpcId, s3ServiceId string) ([]SubUser, error) + DetailSubUser(vpcId, s3ServiceId, subUserId string) *DetailSubUser + CreateSubUserAccessKey(vpcId, s3ServiceId, subUserId string) *CreateAccessKeyResponse + DeleteSubUserAccessKey(vpcId, s3ServiceId, subUserId, accessKeyId string) CommonResponse + + // bucket configuration + PutBucketPolicy(vpcId, s3ServiceId, bucketName string, policy BucketPolicyRequest) CommonResponse + GetBucketPolicy(vpcId, s3ServiceId, bucketName string) *BucketPolicyResponse + + // CORS configuration + PutBucketCors(bucketName, vpcId, s3ServiceId string, cors CorsRule) (CommonResponse, error) + UpdateBucketCors(bucketName, vpcId, s3ServiceId string, cors BucketCors) (CommonResponse, error) + GetBucketCors(vpcId, s3ServiceId, bucketName string) (*BucketCors, error) + + // Versioning configuration + PutBucketVersioning(vpcId, s3ServiceId, bucketName string, versioning BucketVersioningRequest) error + GetBucketVersioning(vpcId, s3ServiceId, bucketName string) *BucketVersioningRequest + + // Acl configuration + PutBucketAcl(vpcId, s3ServiceId, bucketName string, acl BucketAclRequest) PutBucketAclResponse + GetBucketAcl(vpcId, s3ServiceId, bucketName string) (*BucketAclResponse, error) + + // Static website configuration + PutBucketWebsite(vpcId, s3ServiceId, bucketName string, website BucketWebsiteRequest) CommonResponse + GetBucketWebsite(vpcId, s3ServiceId, bucketName string) *BucketWebsiteResponse + DeleteBucketStaticWebsite(vpcId, s3ServiceId, bucketName string) CommonResponse + + // Lifecycle configuration + GetBucketLifecycle(vpcId, s3ServiceId, bucketName, page, pageSize string) (*BucketLifecycleResponse, error) + PutBucketLifecycle(vpcId, s3ServiceId, bucketName string, lifecycle interface{}) (*BucketLifecycleResponse, error) + DeleteBucketLifecycle(vpcId, s3ServiceId, bucketName string, lifecycle interface{}) (*BucketLifecycleResponse, error) +} + +// ObjectStorageServiceImpl is the implementation of ObjectStorageService +type ObjectStorageServiceImpl struct { + client *common.Client +} + +// NewObjectStorageService creates a new instance of ObjectStorageService +func NewObjectStorageService(client *common.Client) ObjectStorageService { + return &ObjectStorageServiceImpl{client: client} +} + +func (s *ObjectStorageServiceImpl) CheckServiceEnable(vpcId string) S3ServiceEnableResponse { + apiPath := common.ApiPath.CheckS3ServiceEnable(vpcId) + resp, err := s.client.SendGetRequest(apiPath) + if err != nil { + return S3ServiceEnableResponse{} + } + + var response S3ServiceEnableResponse + if err := json.Unmarshal(resp, &response); err != nil { + return S3ServiceEnableResponse{} + } + return response +} + +func (s *ObjectStorageServiceImpl) CreateBucket(req BucketRequest, vpcId, s3ServiceId string) CommonResponse { + + apiPath := common.ApiPath.CreateBucket(vpcId, s3ServiceId) + fmt.Println("apiPath", apiPath) + resp, err := s.client.SendPostRequest(apiPath, req) + if err != nil { + return CommonResponse{Status: false, Message: err.Error()} + } + + var bucket BucketRequest + err = json.Unmarshal(resp, &bucket) + if err != nil { + return CommonResponse{Status: false, Message: err.Error()} + } + + return CommonResponse{Status: true, Message: "Bucket created successfully"} +} + +// CreateSubUser creates a new sub-user +func (s *ObjectStorageServiceImpl) CreateSubUser(req SubUser, vpcId, s3ServiceId string) (*SubUser, error) { + apiPath := common.ApiPath.CreateSubUser(vpcId, s3ServiceId) + resp, err := s.client.SendPostRequest(apiPath, req) + if err != nil { + return nil, fmt.Errorf("failed to create sub-user: %v", err) + } + + var subUser SubUser + err = json.Unmarshal(resp, &subUser) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal sub-user response: %v", err) + } + + return &subUser, nil +} + +func (s *ObjectStorageServiceImpl) CreateAccessKey(vpcId, s3ServiceId string) *CreateAccessKeyResponse { + apiPath := common.ApiPath.CreateAccessKey(vpcId, s3ServiceId) + resp, err := s.client.SendPostRequest(apiPath, nil) + fmt.Println("resp", resp) + if err != nil { + return &CreateAccessKeyResponse{Status: false, Message: err.Error()} + } + + var accessKey CreateAccessKeyResponse + err = json.Unmarshal(resp, &accessKey) + if err != nil { + + return &CreateAccessKeyResponse{Status: false, Message: err.Error()} + } + return &accessKey +} + +func (s *ObjectStorageServiceImpl) ListBuckets(vpcId, s3ServiceId string, page, pageSize int) ListBucketResponse { + apiPath := common.ApiPath.ListBuckets(vpcId, s3ServiceId, page, pageSize) + resp, err := s.client.SendGetRequest(apiPath) + if err != nil { + return ListBucketResponse{Total: 0} + } + + var buckets ListBucketResponse + err = json.Unmarshal(resp, &buckets) + if err != nil { + return ListBucketResponse{Total: 0} + } + + return buckets +} + +func (s *ObjectStorageServiceImpl) ListSubUsers(vpcId, s3ServiceId string) ([]SubUser, error) { + apiPath := common.ApiPath.ListSubUsers(vpcId, s3ServiceId) + resp, err := s.client.SendGetRequest(apiPath) + if err != nil { + return nil, fmt.Errorf("failed to list sub-users: %v", err) + } + + var subUsers []SubUser + err = json.Unmarshal(resp, &subUsers) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal sub-user list response: %v", err) + } + + return subUsers, nil +} + +func (s *ObjectStorageServiceImpl) ListAccessKeys(vpcId, s3ServiceId string) (AccessKey, error) { + apiPath := common.ApiPath.ListAccessKeys(vpcId, s3ServiceId) + resp, err := s.client.SendGetRequest(apiPath) + if err != nil { + return AccessKey{}, fmt.Errorf("failed to list access keys: %v", err) + } + + var accessKeys AccessKey + err = json.Unmarshal(resp, &accessKeys) + if err != nil { + return AccessKey{}, fmt.Errorf("failed to unmarshal access key list response: %v", err) + } + + return accessKeys, nil +} + +func (s *ObjectStorageServiceImpl) DeleteBucket(vpcId, s3ServiceId, bucketName string) error { + apiPath := common.ApiPath.DeleteBucket(vpcId, s3ServiceId, bucketName) + if _, err := s.client.SendDeleteRequest(apiPath); err != nil { + return fmt.Errorf("failed to delete bucket: %v", err) + } + return nil +} + +func (s *ObjectStorageServiceImpl) DeleteAccessKey(vpcId, s3ServiceId, accessKeyId string) error { + apiPath := common.ApiPath.DeleteAccessKey(vpcId, s3ServiceId) + body := map[string]string{"accessKey": accessKeyId} + fmt.Println("-----------------") + fmt.Printf("[INFO] Deleting access key: %s\n", accessKeyId) + fmt.Printf("[INFO] vpc_id: %s\n", vpcId) + fmt.Printf("[INFO] body: %s\n", body) + fmt.Printf("[INFO] apiPath: %s\n", apiPath) + fmt.Println("-----------------") + if _, err := s.client.SendDeleteRequestWithBody(apiPath, body); err != nil { + return fmt.Errorf("failed to delete access key: %v", err) + } + return nil +} + +// Implement bucket policy methods +func (s *ObjectStorageServiceImpl) PutBucketPolicy(vpcId, s3ServiceId, bucketName string, policy BucketPolicyRequest) CommonResponse { + apiPath := common.ApiPath.PutBucketPolicy(vpcId, s3ServiceId, bucketName) + if _, err := s.client.SendPutRequest(apiPath, policy); err != nil { + return CommonResponse{Status: false} + } + return CommonResponse{Status: true} +} + +func (s *ObjectStorageServiceImpl) GetBucketPolicy(vpcId, s3ServiceId, bucketName string) *BucketPolicyResponse { + apiPath := common.ApiPath.GetBucketPolicy(vpcId, s3ServiceId, bucketName) + resp, err := s.client.SendGetRequest(apiPath) + if err != nil { + return &BucketPolicyResponse{Status: false} + } + + var policy BucketPolicyResponse + if err := json.Unmarshal(resp, &policy); err != nil { + return &BucketPolicyResponse{Status: false} + } + return &policy +} + +// Implement CORS methods +func (s *ObjectStorageServiceImpl) PutBucketCors(bucketName, vpcId, s3ServiceId string, cors CorsRule) (CommonResponse, error) { + apiPath := common.ApiPath.PutBucketCORS(vpcId, s3ServiceId, bucketName) + if _, err := s.client.SendPutRequest(apiPath, cors); err != nil { + return CommonResponse{Status: false}, fmt.Errorf("failed to update bucket CORS: %v", err) + } + return CommonResponse{Status: true}, nil +} + +func (s *ObjectStorageServiceImpl) UpdateBucketCors(bucketName, vpcId, s3ServiceId string, cors BucketCors) (CommonResponse, error) { + apiPath := common.ApiPath.PutBucketCORS(vpcId, s3ServiceId, bucketName) + if _, err := s.client.SendPutRequest(apiPath, cors); err != nil { + return CommonResponse{Status: false}, fmt.Errorf("failed to update bucket CORS: %v", err) + } + return CommonResponse{Status: true}, nil +} + +func (s *ObjectStorageServiceImpl) GetBucketCors(vpcId, s3ServiceId, bucketName string) (*BucketCors, error) { + apiPath := common.ApiPath.GetBucketCORS(vpcId, s3ServiceId, bucketName) + resp, err := s.client.SendGetRequest(apiPath) + if err != nil { + return nil, fmt.Errorf("failed to get bucket CORS: %v", err) + } + + var cors BucketCors + if err := json.Unmarshal(resp, &cors); err != nil { + return nil, fmt.Errorf("failed to unmarshal bucket CORS: %v", err) + } + return &cors, nil +} + +// Implement versioning methods +func (s *ObjectStorageServiceImpl) PutBucketVersioning(vpcId, s3ServiceId, bucketName string, versioning BucketVersioningRequest) error { + apiPath := common.ApiPath.PutBucketVersioning(vpcId, s3ServiceId, bucketName) + if _, err := s.client.SendPutRequest(apiPath, versioning); err != nil { + return fmt.Errorf("failed to put bucket versioning: %v", err) + } + return nil +} + +func (s *ObjectStorageServiceImpl) GetBucketVersioning(vpcId, s3ServiceId, bucketName string) *BucketVersioningRequest { + apiPath := common.ApiPath.GetBucketVersioning(vpcId, s3ServiceId, bucketName) + resp, err := s.client.SendGetRequest(apiPath) + if err != nil { + return &BucketVersioningRequest{} + } + + var versioning BucketVersioningRequest + if err := json.Unmarshal(resp, &versioning); err != nil { + return &BucketVersioningRequest{} + } + return &versioning +} + +func (s *ObjectStorageServiceImpl) PutBucketWebsite(vpcId, s3ServiceId, bucketName string, website BucketWebsiteRequest) CommonResponse { + apiPath := common.ApiPath.PutBucketWebsite(vpcId, s3ServiceId, bucketName) + if _, err := s.client.SendPutRequest(apiPath, website); err != nil { + return CommonResponse{Status: false} + } + return CommonResponse{Status: true} +} +func (s *ObjectStorageServiceImpl) DeleteBucketStaticWebsite(vpcId, s3ServiceId, bucketName string) CommonResponse { + apiPath := common.ApiPath.DeleteBucketStaticWebsite(vpcId, s3ServiceId, bucketName) + if _, err := s.client.SendDeleteRequest(apiPath); err != nil { + return CommonResponse{Status: false} + } + return CommonResponse{Status: true} +} +func (s *ObjectStorageServiceImpl) GetBucketWebsite(vpcId, s3ServiceId, bucketName string) *BucketWebsiteResponse { + apiPath := common.ApiPath.GetBucketWebsite(vpcId, s3ServiceId, bucketName) + resp, err := s.client.SendGetRequest(apiPath) + if err != nil { + return &BucketWebsiteResponse{Status: false} + } + + var website BucketWebsiteResponse + if err := json.Unmarshal(resp, &website); err != nil { + return &BucketWebsiteResponse{Status: false} + } + return &website +} + +func (s *ObjectStorageServiceImpl) PutBucketAcl(vpcId, s3ServiceId, bucketName string, acl BucketAclRequest) PutBucketAclResponse { + apiPath := common.ApiPath.PutBucketAcl(vpcId, s3ServiceId, bucketName) + resp, err := s.client.SendPutRequest(apiPath, acl) + if err != nil { + return PutBucketAclResponse{Status: false} + } + + var putBucketAclResponse PutBucketAclResponse + if err := json.Unmarshal(resp, &putBucketAclResponse); err != nil { + return PutBucketAclResponse{Status: false} + } + return putBucketAclResponse +} + +func (s *ObjectStorageServiceImpl) GetBucketAcl(vpcId, s3ServiceId, bucketName string) (*BucketAclResponse, error) { + apiPath := common.ApiPath.GetBucketAcl(vpcId, s3ServiceId, bucketName) + resp, err := s.client.SendGetRequest(apiPath) + if err != nil { + return nil, fmt.Errorf("failed to get bucket ACL: %v", err) + } + + var acl BucketAclResponse + if err := json.Unmarshal(resp, &acl); err != nil { + return nil, fmt.Errorf("failed to unmarshal bucket ACL: %v", err) + } + return &acl, nil +} + +func (s *ObjectStorageServiceImpl) DeleteSubUser(vpcId, s3ServiceId, subUserId string) error { + apiPath := common.ApiPath.DeleteSubUser(vpcId, s3ServiceId, subUserId) + if _, err := s.client.SendDeleteRequest(apiPath); err != nil { + return fmt.Errorf("failed to delete sub-user: %v", err) + } + return nil +} + +func (s *ObjectStorageServiceImpl) GetBucketLifecycle(vpcId, s3ServiceId, bucketName, page, pageSize string) (*BucketLifecycleResponse, error) { + apiPath := common.ApiPath.GetBucketLifecycle(vpcId, s3ServiceId, bucketName, page, pageSize) + resp, err := s.client.SendGetRequest(apiPath) + if err != nil { + return nil, fmt.Errorf("failed to get bucket lifecycle: %v", err) + } + + var bucketLifecycle BucketLifecycleResponse + if err := json.Unmarshal(resp, &bucketLifecycle); err != nil { + return nil, fmt.Errorf("failed to unmarshal bucket lifecycle: %v", err) + } + return &bucketLifecycle, nil +} + +func (s *ObjectStorageServiceImpl) PutBucketLifecycle(vpcId, s3ServiceId, bucketName string, lifecycle interface{}) (*BucketLifecycleResponse, error) { + apiPath := common.ApiPath.PutBucketLifecycle(vpcId, s3ServiceId, bucketName) + resp, err := s.client.SendPutRequest(apiPath, lifecycle) + if err != nil { + return nil, fmt.Errorf("failed to put bucket lifecycle: %v", err) + } + + var bucketLifecycle BucketLifecycleResponse + if err := json.Unmarshal(resp, &bucketLifecycle); err != nil { + return nil, fmt.Errorf("failed to unmarshal bucket lifecycle: %v", err) + } + return &bucketLifecycle, nil +} + +func (s *ObjectStorageServiceImpl) DeleteBucketLifecycle(vpcId, s3ServiceId, bucketName string, lifecycle interface{}) (*BucketLifecycleResponse, error) { + apiPath := common.ApiPath.DeleteBucketLifecycle(vpcId, s3ServiceId, bucketName) + resp, err := s.client.SendPutRequest(apiPath, lifecycle) + if err != nil { + return nil, fmt.Errorf("failed to delete bucket lifecycle: %v", err) + } + + var bucketLifecycle BucketLifecycleResponse + if err := json.Unmarshal(resp, &bucketLifecycle); err != nil { + return nil, fmt.Errorf("failed to unmarshal bucket lifecycle: %v", err) + } + return &bucketLifecycle, nil +} + +func (s *ObjectStorageServiceImpl) CreateSubUserAccessKey(vpcId, s3ServiceId, subUserId string) *CreateAccessKeyResponse { + apiPath := common.ApiPath.CreateSubUserAccessKey(vpcId, s3ServiceId, subUserId) + resp, err := s.client.SendPostRequest(apiPath, nil) + if err != nil { + return nil + } + + var accessKey CreateAccessKeyResponse + if err := json.Unmarshal(resp, &accessKey); err != nil { + return nil + } + return &accessKey +} + +func (s *ObjectStorageServiceImpl) DeleteSubUserAccessKey(vpcId, s3ServiceId, subUserId, accessKeyId string) CommonResponse { + apiPath := common.ApiPath.DeleteSubUserAccessKey(vpcId, s3ServiceId, subUserId, accessKeyId) + if _, err := s.client.SendDeleteRequest(apiPath); err != nil { + return CommonResponse{Status: false} + } + return CommonResponse{Status: true} +} + +func (s *ObjectStorageServiceImpl) DetailSubUser(vpcId, s3ServiceId, subUserId string) *DetailSubUser { + apiPath := common.ApiPath.DetailSubUser(vpcId, s3ServiceId, subUserId) + resp, err := s.client.SendGetRequest(apiPath) + if err != nil { + return nil + } + + var detail DetailSubUser + if err := json.Unmarshal(resp, &detail); err != nil { + return nil + } + return &detail +} diff --git a/fptcloud/object-storage/datasource_object_storage_access_key.go b/fptcloud/object-storage/datasource_object_storage_access_key.go new file mode 100644 index 0000000..ecb442a --- /dev/null +++ b/fptcloud/object-storage/datasource_object_storage_access_key.go @@ -0,0 +1,102 @@ +package fptcloud_object_storage + +import ( + "context" + common "terraform-provider-fptcloud/commons" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +// data_source_object_storage_access_key.go +func DataSourceAccessKey() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceAccessKeyRead, + Schema: map[string]*schema.Schema{ + "vpc_id": { + Type: schema.TypeString, + Required: true, + }, + "region_name": { + Type: schema.TypeString, + Required: true, + Description: "The region name to create the access key", + }, + "access_keys": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "credentials": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "access_key": { + Type: schema.TypeString, + Computed: true, + }, + "active": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func dataSourceAccessKeyRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + client := m.(*common.Client) + service := NewObjectStorageService(client) + vpcId := d.Get("vpc_id").(string) + regionName := d.Get("region_name").(string) + s3ServiceDetail := getServiceEnableRegion(service, vpcId, regionName) + _, err := service.ListAccessKeys(vpcId, s3ServiceDetail.S3ServiceId) + if err != nil { + return diag.FromErr(err) + } + + // if len(accessKeys.Credentials) > 0 { + // d.SetId(fmt.Sprintf("access_keys_%d", len(accessKeys))) + // if err := d.Set("access_keys", flattenAccessKeys(accessKeys)); err != nil { + // return diag.FromErr(err) + // } + // } + + return nil +} + +// func flattenAccessKeys(accessKeys AccessKey) []interface{} { +// var result []interface{} +// for _, ak := range accessKeys.Credentials { +// for _, cred := range ak.Credentials { +// credMap := map[string]interface{}{ +// "id": cred.ID, +// "credentials": flattenCredentials(cred.Credentials), +// } +// result = append(result, credMap) +// } +// } +// return result +// } + +func flattenCredentials(credentials []struct { + AccessKey string `json:"accessKey"` + Active bool `json:"active"` + CreatedDate interface{} `json:"createdDate"` +}) []interface{} { + var result []interface{} + for _, cred := range credentials { + credMap := map[string]interface{}{ + "access_key": cred.AccessKey, + "active": cred.Active, + } + result = append(result, credMap) + } + return result +} diff --git a/fptcloud/object-storage/datasource_object_storage_bucket.go b/fptcloud/object-storage/datasource_object_storage_bucket.go new file mode 100644 index 0000000..4f39169 --- /dev/null +++ b/fptcloud/object-storage/datasource_object_storage_bucket.go @@ -0,0 +1,71 @@ +package fptcloud_object_storage + +import ( + "context" + common "terraform-provider-fptcloud/commons" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func DataSourceBucket() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceBucketRead, + Schema: map[string]*schema.Schema{ + "vpd_id": { + Type: schema.TypeString, + Required: true, + Description: "The VPC ID", + }, + "name": { + Type: schema.TypeString, + Required: true, + Description: "Name of the bucket", + }, + "region": { + Type: schema.TypeString, + Computed: true, + Description: "Region where the bucket is located", + }, + "versioning": { + Type: schema.TypeBool, + Computed: true, + Description: "Whether versioning is enabled", + }, + "acl": { + Type: schema.TypeString, + Computed: true, + Description: "Access control list", + }, + }, + } +} + +func dataSourceBucketRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + client := m.(*common.Client) + service := NewObjectStorageService(client) + vpcId := d.Get("vpc_id").(string) + s3ServiceDetail := getServiceEnableRegion(service, vpcId, d.Get("region_name").(string)) + page := 1 + if d.Get("page") != nil { + page = d.Get("page").(int) + } + pageSize := 25 + if d.Get("page_size") != nil { + pageSize = d.Get("page_size").(int) + } + buckets := service.ListBuckets(vpcId, s3ServiceDetail.S3ServiceId, page, pageSize) + if buckets.Total == 0 { + return diag.Errorf("no buckets found") + } + + bucketName := d.Get("name").(string) + for _, bucket := range buckets.Buckets { + if bucket.Name == bucketName { + d.SetId(bucket.Name) + return nil + } + } + + return diag.Errorf("bucket with name %s not found", bucketName) +} diff --git a/fptcloud/object-storage/datasource_object_storage_bucket_cors.go b/fptcloud/object-storage/datasource_object_storage_bucket_cors.go new file mode 100644 index 0000000..e94916c --- /dev/null +++ b/fptcloud/object-storage/datasource_object_storage_bucket_cors.go @@ -0,0 +1,93 @@ +package fptcloud_object_storage + +import ( + "context" + common "terraform-provider-fptcloud/commons" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func DataSourceBucketCors() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceBucketCorsRead, + Schema: map[string]*schema.Schema{ + "bucket_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Name of the bucket", + }, + "vpc_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The VPC ID", + }, + "cors_rule": { + Type: schema.TypeList, + Required: true, + Description: "The bucket cors rule", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "allowed_headers": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "allowed_methods": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "allowed_origins": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "expose_headers": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "max_age_seconds": { + Type: schema.TypeInt, + Optional: true, + }, + }, + }, + }, + }, + } +} + +func dataSourceBucketCorsRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + client := m.(*common.Client) + service := NewObjectStorageService(client) + vpcId := d.Get("vpc_id").(string) + s3ServiceDetail := getServiceEnableRegion(service, vpcId, d.Get("region_name").(string)) + bucketName := d.Get("bucket_name").(string) + + corsRule, err := service.GetBucketCors(vpcId, s3ServiceDetail.S3ServiceId, bucketName) + if err != nil { + return diag.FromErr(err) + } + + if corsRule == nil { + d.SetId("") + return nil + } + + d.SetId(bucketName) + d.Set("cors_rule", corsRule) + return nil + +} diff --git a/fptcloud/object-storage/datasource_object_storage_bucket_policy.go b/fptcloud/object-storage/datasource_object_storage_bucket_policy.go new file mode 100644 index 0000000..15c323a --- /dev/null +++ b/fptcloud/object-storage/datasource_object_storage_bucket_policy.go @@ -0,0 +1,55 @@ +package fptcloud_object_storage + +import ( + "context" + "fmt" + common "terraform-provider-fptcloud/commons" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func DataSourceBucketPolicy() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceBucketPolicyRead, + Schema: map[string]*schema.Schema{ + "vpc_id": { + Type: schema.TypeString, + Required: true, + Description: "The VPC ID", + }, + "bucket_name": { + Type: schema.TypeString, + Required: true, + Description: "Name of the bucket to fetch policy for", + }, + "policy": { + Type: schema.TypeString, + Computed: true, + Description: "The bucket policy in JSON format", + }, + }, + } +} + +func dataSourceBucketPolicyRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + client := m.(*common.Client) + service := NewObjectStorageService(client) + + bucketName := d.Get("bucket_name").(string) + vpcId := d.Get("vpc_id").(string) + s3ServiceDetail := getServiceEnableRegion(service, vpcId, d.Get("region_name").(string)) + policyResponse := service.GetBucketPolicy(vpcId, bucketName, s3ServiceDetail.S3ServiceId) + if !policyResponse.Status { + return diag.Errorf("failed to get bucket policy for bucket %s", bucketName) + } + + // Set the ID to be a combination of bucket name to ensure unique data source + d.SetId(fmt.Sprintf("bucket_policy_%s", bucketName)) + + if err := d.Set("policy", policyResponse.Policy); err != nil { + return diag.FromErr(err) + } + + return nil +} diff --git a/fptcloud/object-storage/datasource_object_storage_lifecycle.go b/fptcloud/object-storage/datasource_object_storage_lifecycle.go new file mode 100644 index 0000000..7f3544d --- /dev/null +++ b/fptcloud/object-storage/datasource_object_storage_lifecycle.go @@ -0,0 +1,75 @@ +package fptcloud_object_storage + +import ( + "context" + "fmt" + common "terraform-provider-fptcloud/commons" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func DataSourceBucketLifecycle() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceBucketLifecycle, + Schema: map[string]*schema.Schema{ + "vpc_id": { + Type: schema.TypeString, + Required: true, + Description: "The VPC ID", + }, + "bucket_name": { + Type: schema.TypeString, + Required: true, + Description: "Name of the bucket to fetch policy for", + }, + "policy": { + Type: schema.TypeString, + Computed: true, + Description: "The bucket policy in JSON format", + }, + "region_name": { + Type: schema.TypeString, + Required: false, + Default: "HCM-02", + Optional: true, + Description: "The region name of the bucket", + }, + "page_size": { + Type: schema.TypeString, + Optional: true, + Default: "25", + Description: "The number of items to return in each page", + }, + "page": { + Type: schema.TypeString, + Optional: true, + Default: "1", + Description: "The page number", + }, + }, + } +} + +func dataSourceBucketLifecycle(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + client := m.(*common.Client) + service := NewObjectStorageService(client) + + bucketName := d.Get("bucket_name").(string) + vpcId := d.Get("vpc_id").(string) + s3ServiceDetail := getServiceEnableRegion(service, vpcId, d.Get("region_name").(string)) + page := d.Get("page").(string) + pageSize := d.Get("page_size").(string) + + lifeCycleResponse, err := service.GetBucketLifecycle(vpcId, s3ServiceDetail.S3ServiceId, bucketName, page, pageSize) + if err != nil { + return diag.FromErr(err) + } + + d.SetId(fmt.Sprintf("%s-%s", vpcId, bucketName)) + if err := d.Set("policy", lifeCycleResponse.Rules); err != nil { + return diag.FromErr(err) + } + + return nil +} diff --git a/fptcloud/object-storage/datasource_object_storage_static_website.go b/fptcloud/object-storage/datasource_object_storage_static_website.go new file mode 100644 index 0000000..00c3692 --- /dev/null +++ b/fptcloud/object-storage/datasource_object_storage_static_website.go @@ -0,0 +1,144 @@ +package fptcloud_object_storage + +import ( + "context" + common "terraform-provider-fptcloud/commons" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func DataSourceBucketStaticWebsite() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceBucketStaticWebsite, + Schema: map[string]*schema.Schema{ + "vpc_id": { + Type: schema.TypeString, + Required: true, + Description: "The VPC ID", + }, + "bucket_name": { + Type: schema.TypeString, + Required: true, + Description: "Name of the bucket to fetch policy for", + }, + "region_name": { + Type: schema.TypeString, + Required: false, + Default: "HCM-02", + Optional: true, + Description: "The region name of the bucket", + }, + "status": { + Type: schema.TypeBool, + Computed: true, + Description: "Status of the bucket website configuration", + }, + "request_id": { + Type: schema.TypeString, + Computed: true, + Description: "Request ID of the operation", + }, + "host_id": { + Type: schema.TypeString, + Computed: true, + Description: "Host ID of the operation", + }, + "http_status_code": { + Type: schema.TypeInt, + Computed: true, + Description: "HTTP status code of the operation", + }, + "http_headers": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: "HTTP headers of the response", + }, + "retry_attempts": { + Type: schema.TypeInt, + Computed: true, + Description: "Number of retry attempts", + }, + "index_document": { + Type: schema.TypeString, + Computed: true, + Description: "Suffix for index document", + ForceNew: true, + }, + "error_document": { + Type: schema.TypeString, + Computed: true, + Description: "Key for error document", + ForceNew: true, + }, + }, + } +} + +func dataSourceBucketStaticWebsite(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + client := m.(*common.Client) + service := NewObjectStorageService(client) + + bucketName := d.Get("bucket_name").(string) + vpcId := d.Get("vpc_id").(string) + s3ServiceDetail := getServiceEnableRegion(service, vpcId, d.Get("region_name").(string)) + + staticWebsiteResponse := service.GetBucketWebsite(vpcId, bucketName, s3ServiceDetail.S3ServiceId) + if !staticWebsiteResponse.Status { + return diag.Errorf("failed to get bucket policy for bucket %s", bucketName) + } + + d.SetId(bucketName) + + // Set the computed values + if err := d.Set("status", staticWebsiteResponse.Status); err != nil { + return diag.FromErr(err) + } + + if staticWebsiteResponse.Config.ResponseMetadata.RequestID != "" { + if err := d.Set("request_id", staticWebsiteResponse.Config.ResponseMetadata.RequestID); err != nil { + return diag.FromErr(err) + } + } + + if staticWebsiteResponse.Config.ResponseMetadata.HostID != "" { + if err := d.Set("host_id", staticWebsiteResponse.Config.ResponseMetadata.HostID); err != nil { + return diag.FromErr(err) + } + } + + if err := d.Set("http_status_code", staticWebsiteResponse.Config.ResponseMetadata.HTTPStatusCode); err != nil { + return diag.FromErr(err) + } + + headers := map[string]string{ + "x-amz-request-id": staticWebsiteResponse.Config.ResponseMetadata.HTTPHeaders.XAmzRequestID, + "content-type": staticWebsiteResponse.Config.ResponseMetadata.HTTPHeaders.ContentType, + "content-length": staticWebsiteResponse.Config.ResponseMetadata.HTTPHeaders.ContentLength, + "date": staticWebsiteResponse.Config.ResponseMetadata.HTTPHeaders.Date, + } + if err := d.Set("http_headers", headers); err != nil { + return diag.FromErr(err) + } + + if err := d.Set("retry_attempts", staticWebsiteResponse.Config.ResponseMetadata.RetryAttempts); err != nil { + return diag.FromErr(err) + } + + if staticWebsiteResponse.Config.IndexDocument.Suffix != "" { + if err := d.Set("index_document", staticWebsiteResponse.Config.IndexDocument.Suffix); err != nil { + return diag.FromErr(err) + } + } + + if staticWebsiteResponse.Config.ErrorDocument.Key != "" { + if err := d.Set("error_document", staticWebsiteResponse.Config.ErrorDocument.Key); err != nil { + return diag.FromErr(err) + } + } + + return nil +} diff --git a/fptcloud/object-storage/datasource_object_storage_sub_user.go b/fptcloud/object-storage/datasource_object_storage_sub_user.go new file mode 100644 index 0000000..f59f439 --- /dev/null +++ b/fptcloud/object-storage/datasource_object_storage_sub_user.go @@ -0,0 +1,57 @@ +package fptcloud_object_storage + +import ( + "context" + common "terraform-provider-fptcloud/commons" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +// datasource_object_storage_sub_user.go +func DataSourceSubUser() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceSubUserRead, + Schema: map[string]*schema.Schema{ + "role": { + Type: schema.TypeString, + Required: true, + Description: "Role of the sub-user", + }, + "user_id": { + Type: schema.TypeString, + Description: "ID of the sub-user", + ForceNew: true, + Required: true, + }, + "vpd_id": { + Type: schema.TypeString, + Required: true, + Description: "The VPC ID", + }, + }, + } +} + +func dataSourceSubUserRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + client := m.(*common.Client) + service := NewObjectStorageService(client) + vpcId := d.Get("vpc_id").(string) + s3ServiceDetail := getServiceEnableRegion(service, vpcId, d.Get("region_name").(string)) + + subUsers, err := service.ListSubUsers(vpcId, s3ServiceDetail.S3ServiceId) + if err != nil { + return diag.FromErr(err) + } + + role := d.Get("role").(string) + for _, user := range subUsers { + if user.Role == role { + d.SetId(user.UserId) + d.Set("user_id", user.UserId) + return nil + } + } + + return diag.Errorf("sub-user with role %s not found", role) +} diff --git a/fptcloud/object-storage/datasource_object_storage_sub_user_detail.go b/fptcloud/object-storage/datasource_object_storage_sub_user_detail.go new file mode 100644 index 0000000..ab4888e --- /dev/null +++ b/fptcloud/object-storage/datasource_object_storage_sub_user_detail.go @@ -0,0 +1,93 @@ +package fptcloud_object_storage + +import ( + "context" + common "terraform-provider-fptcloud/commons" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func DataSourceSubUserDetail() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceSubUserDetailRead, + Schema: map[string]*schema.Schema{ + "vpc_id": { + Type: schema.TypeString, + Required: true, + Description: "The VPC ID", + }, + "s3_service_id": { + Type: schema.TypeString, + Required: true, + Description: "The S3 service ID", + }, + "sub_user_id": { + Type: schema.TypeString, + Required: true, + Description: "The sub-user ID", + }, + "user_id": { + Type: schema.TypeString, + Computed: true, + Description: "The sub-user ID", + }, + "arn": { + Type: schema.TypeString, + Computed: true, + Description: "The sub-user ARN", + }, + "active": { + Type: schema.TypeBool, + Computed: true, + Description: "Whether the sub-user is active", + }, + "role": { + Type: schema.TypeString, + Computed: true, + Description: "The sub-user's role", + }, + "created_at": { + Type: schema.TypeString, + Computed: true, + Description: "The sub-user's creation date", + }, + "access_keys": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: "The sub-user's access keys", + }, + }, + } +} + +func dataSourceSubUserDetailRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + client := m.(*common.Client) + service := NewObjectStorageService(client) + + vpcId := d.Get("vpc_id").(string) + s3ServiceId := d.Get("s3_service_id").(string) + subUserId := d.Get("sub_user_id").(string) + + subUser := service.DetailSubUser(vpcId, s3ServiceId, subUserId) + if subUser == nil { + return diag.Errorf("sub-user with ID %s not found", subUserId) + } + + d.SetId(subUser.UserID) + d.Set("user_id", subUser.UserID) + if subUser.Arn != nil { + d.Set("arn", subUser.Arn) + } + d.Set("active", subUser.Active) + d.Set("role", subUser.Role) + if subUser.CreatedAt != nil { + d.Set("created_at", subUser.CreatedAt) + } + d.Set("access_keys", subUser.AccessKeys) + + return nil +} diff --git a/fptcloud/object-storage/datasource_object_storage_versioning.go b/fptcloud/object-storage/datasource_object_storage_versioning.go new file mode 100644 index 0000000..d45b2e8 --- /dev/null +++ b/fptcloud/object-storage/datasource_object_storage_versioning.go @@ -0,0 +1,52 @@ +package fptcloud_object_storage + +import ( + "context" + common "terraform-provider-fptcloud/commons" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func DataSourceBucketVersioning() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceBucketVersioningRead, + Schema: map[string]*schema.Schema{ + "bucket_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Name of the bucket", + }, + "vpd_id": { + Type: schema.TypeString, + Required: true, + Description: "The VPC ID", + }, + "enabled": { + Type: schema.TypeBool, + Computed: true, + Description: "Enable or suspend versioning", + }, + }, + } +} + +func dataSourceBucketVersioningRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + client := m.(*common.Client) + service := NewObjectStorageService(client) + + vpcId := d.Get("vpc_id").(string) + s3ServiceDetail := getServiceEnableRegion(service, vpcId, d.Get("region_name").(string)) + bucketName := d.Get("bucket_name").(string) + + versioning := service.GetBucketVersioning(vpcId, bucketName, s3ServiceDetail.S3ServiceId) + if versioning == nil { + return diag.Errorf("failed to get bucket versioning for bucket %s", bucketName) + } + + d.SetId(bucketName) + d.Set("enabled", versioning.Status == "Enabled") + + return nil +} diff --git a/fptcloud/object-storage/resource_access_key.go b/fptcloud/object-storage/resource_access_key.go new file mode 100644 index 0000000..fc3586a --- /dev/null +++ b/fptcloud/object-storage/resource_access_key.go @@ -0,0 +1,115 @@ +package fptcloud_object_storage + +import ( + "context" + "fmt" + common "terraform-provider-fptcloud/commons" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func ResourceAccessKey() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceAccessKeyCreate, + ReadContext: resourceAccessKeyRead, + DeleteContext: resourceAccessKeyDelete, + Schema: map[string]*schema.Schema{ + "vpc_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The VPC ID", + }, + "access_key_id": { + Type: schema.TypeString, + Required: false, + Computed: true, + ForceNew: true, + Description: "The access key ID", + }, + "secret_access_key": { + Type: schema.TypeString, + Computed: true, + ForceNew: true, + Description: "The secret access key", + }, + "region_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The region name to create the access key", + }, + "create_access_key_response": { + Type: schema.TypeString, + Computed: true, + Description: "The create access key response", + }, + }, + Importer: &schema.ResourceImporter{ + StateContext: schema.ImportStatePassthroughContext, + }, + } +} +func resourceAccessKeyCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + client := m.(*common.Client) + service := NewObjectStorageService(client) + vpcId := d.Get("vpc_id").(string) + regionName := d.Get("region_name").(string) + s3ServiceDetail := getServiceEnableRegion(service, vpcId, regionName) + resp := service.CreateAccessKey(vpcId, s3ServiceDetail.S3ServiceId) + var createAccessKeyResponse CreateAccessKeyResponse + if resp.Credential.AccessKey != "" && resp.Credential.SecretKey != "" { + createAccessKeyResponse.Credential.AccessKey = resp.Credential.AccessKey + createAccessKeyResponse.Credential.SecretKey = resp.Credential.SecretKey + } + if resp.Message != "" { + createAccessKeyResponse.Message = resp.Message + } + createAccessKeyResponse.Status = resp.Status + fmt.Println("Create access key response: ", createAccessKeyResponse) + + p := fmt.Sprintf("%v", createAccessKeyResponse) + d.Set("access_key_id", createAccessKeyResponse.Credential.AccessKey) + d.Set("secret_access_key", createAccessKeyResponse.Credential.SecretKey) + d.SetId(resp.Credential.AccessKey) + d.Set("create_access_key_response", p) + + return nil +} + +func resourceAccessKeyRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + client := m.(*common.Client) + service := NewObjectStorageService(client) + vpcId := d.Get("vpc_id").(string) + regionName := d.Get("region_name").(string) + s3ServiceId := getServiceEnableRegion(service, vpcId, regionName).S3ServiceId + resp, err := service.ListAccessKeys(vpcId, s3ServiceId) + if err != nil { + return diag.FromErr(err) + } + secretAccessKey := d.Get("secret_access_key").(string) + accessKeyId := d.Get("access_key_id").(string) + for _, accessKey := range resp.Credentials { + for _, key := range accessKey.Credentials { + if key.AccessKey == accessKeyId { + d.Set("access_key_id", key.AccessKey) + d.Set("secret_access_key", secretAccessKey) + break + } + } + } + return nil +} + +func resourceAccessKeyDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + client := m.(*common.Client) + service := NewObjectStorageService(client) + vpcId := d.Get("vpc_id").(string) + regionName := d.Get("region_name").(string) + accessKeyId := d.Get("access_key_id").(string) + s3ServiceDetail := getServiceEnableRegion(service, vpcId, regionName) + + service.DeleteAccessKey(vpcId, s3ServiceDetail.S3ServiceId, accessKeyId) + return nil +} diff --git a/fptcloud/object-storage/resource_bucket.go b/fptcloud/object-storage/resource_bucket.go new file mode 100644 index 0000000..295b542 --- /dev/null +++ b/fptcloud/object-storage/resource_bucket.go @@ -0,0 +1,131 @@ +package fptcloud_object_storage + +import ( + "context" + "fmt" + common "terraform-provider-fptcloud/commons" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +type S3ServiceDetail struct { + S3ServiceName string + S3ServiceId string + S3Platform string +} + +func ResourceBucket() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceBucketCreate, + DeleteContext: resourceBucketDelete, + ReadContext: dataSourceBucketRead, + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The name of the bucket. Bucket names must be unique within an account.", + }, + "versioning": { + Type: schema.TypeString, + Optional: true, + Default: "", + ForceNew: true, + Description: "The versioning state of the bucket. Accepted values are Enabled or Suspended, default was not set.", + }, + "region_name": { + Type: schema.TypeString, + Required: false, + // Default: "HCM-02" if not provided + Default: "HCM-02", + Optional: true, + ForceNew: true, + Description: "The region name that's are the same with the region name in the S3 service.", + }, + "acl": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "private", + }, + "vpc_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func getServiceEnableRegion(objectStorageService ObjectStorageService, vpcId, regionName string) S3ServiceDetail { + serviceEnable := objectStorageService.CheckServiceEnable(vpcId) + if serviceEnable.Total == 0 { + return S3ServiceDetail{} + } + + var s3ServiceDetail S3ServiceDetail + for _, service := range serviceEnable.Data { + if service.S3ServiceName == regionName { + s3ServiceDetail.S3ServiceName = service.S3ServiceName + s3ServiceDetail.S3ServiceId = service.S3ServiceID + s3ServiceDetail.S3Platform = service.S3Platform + break + } + } + return s3ServiceDetail +} + +func resourceBucketCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + client := m.(*common.Client) + objectStorageService := NewObjectStorageService(client) + vpcId := d.Get("vpc_id").(string) + + req := BucketRequest{ + Name: d.Get("name").(string), + Versioning: d.Get("versioning").(string), + Acl: d.Get("acl").(string), + } + s3ServiceDetail := getServiceEnableRegion(objectStorageService, vpcId, d.Get("region_name").(string)) + if s3ServiceDetail.S3ServiceId == "" { + return diag.FromErr(fmt.Errorf("region %s is not enabled", d.Get("region_name").(string))) + } + + bucket := objectStorageService.CreateBucket(req, vpcId, s3ServiceDetail.S3ServiceId) + if !bucket.Status { + return diag.Errorf(bucket.Message) + } + return resourceBucketRead(ctx, d, m) +} +func resourceBucketRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + client := m.(*common.Client) + objectStorageService := NewObjectStorageService(client) + vpcId := d.Get("vpc_id").(string) + s3ServiceDetail := getServiceEnableRegion(objectStorageService, vpcId, d.Get("region_name").(string)) + + bucket := objectStorageService.ListBuckets(vpcId, s3ServiceDetail.S3ServiceId, 1, 99999) + if bucket.Total == 0 { + return diag.Errorf("no buckets found") + } + for _, b := range bucket.Buckets { + if b.Name == d.Get("name").(string) { + d.SetId(b.Name) + d.Set("name", b.Name) + return nil + } + } + return diag.Errorf("bucket with name %s not found", d.Get("name").(string)) +} +func resourceBucketDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + client := m.(*common.Client) + objectStorageService := NewObjectStorageService(client) + vpcId := d.Get("vpc_id").(string) + bucketName := d.Get("name").(string) + s3ServiceDetail := getServiceEnableRegion(objectStorageService, vpcId, d.Get("region_name").(string)) + + err := objectStorageService.DeleteBucket(bucketName, vpcId, s3ServiceDetail.S3ServiceId) + if err != nil { + return diag.FromErr(err) + } + return nil +} diff --git a/fptcloud/object-storage/resource_bucket_cors.go b/fptcloud/object-storage/resource_bucket_cors.go new file mode 100644 index 0000000..51124d3 --- /dev/null +++ b/fptcloud/object-storage/resource_bucket_cors.go @@ -0,0 +1,146 @@ +package fptcloud_object_storage + +import ( + "context" + common "terraform-provider-fptcloud/commons" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func ResourceBucketCors() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceBucketCorsCreate, + UpdateContext: resourceBucketCorsUpdate, + DeleteContext: resourceBucketCorsDelete, + ReadContext: dataSourceBucketCorsRead, + Schema: map[string]*schema.Schema{ + "bucket_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Name of the bucket", + }, + "vpc_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The VPC ID", + }, "cors_rule": { + Type: schema.TypeList, + Required: true, + Description: "The bucket cors rule", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "allowed_headers": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "allowed_methods": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "allowed_origins": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "expose_headers": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "max_age_seconds": { + Type: schema.TypeInt, + Optional: true, + }, + }, + }, + }, + }, + } +} + +func resourceBucketCorsCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + client := m.(*common.Client) + service := NewObjectStorageService(client) + vpcId := d.Get("vpc_id").(string) + s3ServiceDetail := getServiceEnableRegion(service, vpcId, d.Get("region_name").(string)) + + bucketName := d.Get("bucket_name").(string) + corsRule := d.Get("cors_rule").([]interface{}) + + cors := make([]CorsRule, 0) + for _, rule := range corsRule { + r := rule.(map[string]interface{}) + cors = append(cors, CorsRule{ + AllowedHeaders: r["allowed_headers"].([]string), + AllowedMethods: r["allowed_methods"].([]string), + AllowedOrigins: r["allowed_origins"].([]string), + ExposeHeaders: r["expose_headers"].([]string), + MaxAgeSeconds: r["max_age_seconds"].(int), + ID: "", // should implement later + }) + } + + _, err := service.PutBucketCors(bucketName, vpcId, s3ServiceDetail.S3ServiceId, CorsRule{ + AllowedHeaders: cors[0].AllowedHeaders, + AllowedMethods: cors[0].AllowedMethods, + AllowedOrigins: cors[0].AllowedOrigins, + ExposeHeaders: cors[0].ExposeHeaders, + }) + if err != nil { + return diag.Errorf("failed to create bucket cors for bucket %s", bucketName) + } + + d.SetId(bucketName) + return nil +} + +func resourceBucketCorsUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + client := m.(*common.Client) + service := NewObjectStorageService(client) + + bucketName := d.Get("bucket_name").(string) + vpcId := d.Get("vpc_id").(string) + s3ServiceDetail := getServiceEnableRegion(service, vpcId, d.Get("region_name").(string)) + + corsRule := d.Get("cors_rule").([]interface{}) + + cors := make([]CorsRule, 0) + for _, rule := range corsRule { + r := rule.(map[string]interface{}) + cors = append(cors, CorsRule{ + AllowedHeaders: r["allowed_headers"].([]string), + AllowedMethods: r["allowed_methods"].([]string), + AllowedOrigins: r["allowed_origins"].([]string), + ExposeHeaders: r["expose_headers"].([]string), + MaxAgeSeconds: r["max_age_seconds"].(int), + ID: "random-string-id", // should implement later + }) + } + + _, err := service.UpdateBucketCors(vpcId, s3ServiceDetail.S3ServiceId, bucketName, BucketCors{ + CorsRules: cors, + }) + if err != nil { + return diag.Errorf("failed to update bucket cors for bucket %s", bucketName) + } + + d.SetId(bucketName) + return nil +} + +func resourceBucketCorsDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + return resourceBucketCorsUpdate(ctx, d, m) +} diff --git a/fptcloud/object-storage/resource_bucket_policy.go b/fptcloud/object-storage/resource_bucket_policy.go new file mode 100644 index 0000000..e02b48e --- /dev/null +++ b/fptcloud/object-storage/resource_bucket_policy.go @@ -0,0 +1,91 @@ +package fptcloud_object_storage + +import ( + "context" + common "terraform-provider-fptcloud/commons" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func ResourceBucketPolicy() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceBucketPolicyCreate, + UpdateContext: resourceBucketPolicyUpdate, + DeleteContext: resourceBucketPolicyDelete, + ReadContext: dataSourceBucketPolicyRead, + Schema: map[string]*schema.Schema{ + "bucket_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Name of the bucket", + }, + "policy": { + Type: schema.TypeString, + Required: true, + Description: "The bucket policy in JSON format", + }, + "region_name": { + Type: schema.TypeString, + Required: false, + Default: "HCM-02", + Optional: true, + ForceNew: true, + Description: "The region name of the bucket", + }, + "vpc_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The VPC ID", + }, + }, + } +} + +func resourceBucketPolicyCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + client := m.(*common.Client) + service := NewObjectStorageService(client) + + bucketName := d.Get("bucket_name").(string) + policy := d.Get("policy").(string) + vpcId := d.Get("vpc_id").(string) + regionName := d.Get("region_name").(string) + s3ServiceDetail := getServiceEnableRegion(service, vpcId, regionName) + + resp := service.PutBucketPolicy(vpcId, s3ServiceDetail.S3ServiceId, bucketName, BucketPolicyRequest{ + Policy: policy, + }) + + if !resp.Status { + return diag.Errorf("failed to create bucket policy for bucket %s", bucketName) + } + + d.SetId(bucketName) + return nil +} + +func resourceBucketPolicyUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + return resourceBucketPolicyCreate(ctx, d, m) +} + +func resourceBucketPolicyDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + client := m.(*common.Client) + service := NewObjectStorageService(client) + + bucketName := d.Get("bucket_name").(string) + vpcId := d.Get("vpc_id").(string) + regionName := d.Get("region_name").(string) + s3ServiceDetail := getServiceEnableRegion(service, vpcId, regionName) + + resp := service.PutBucketPolicy(vpcId, s3ServiceDetail.S3ServiceId, bucketName, BucketPolicyRequest{ + Policy: "", + }) + + if !resp.Status { + return diag.Errorf("failed to delete bucket policy for bucket %s", d.Id()) + } + + return nil +} diff --git a/fptcloud/object-storage/resource_bucket_static_website.go b/fptcloud/object-storage/resource_bucket_static_website.go new file mode 100644 index 0000000..56fafe6 --- /dev/null +++ b/fptcloud/object-storage/resource_bucket_static_website.go @@ -0,0 +1,108 @@ +package fptcloud_object_storage + +import ( + "context" + "fmt" + common "terraform-provider-fptcloud/commons" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func ResourceBucketStaticWebsite() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceBucketStaticWebsiteCreate, + ReadContext: dataSourceBucketStaticWebsite, + DeleteContext: resourceDeleteBucketStaticWebsite, + //UpdateContext: nil, + Schema: map[string]*schema.Schema{ + "bucket_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Name of the bucket", + }, + "region_name": { + Type: schema.TypeString, + Required: false, + Default: "HCM-02", + Optional: true, + ForceNew: true, + Description: "The region name of the bucket", + }, + "vpc_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The VPC ID", + }, + "index_document_suffix": { + Type: schema.TypeString, + Optional: true, + Default: "index.html", + ForceNew: true, + Description: "Suffix that is appended to a request that is for a directory", + }, + "error_document_key": { + Type: schema.TypeString, + Optional: true, + Default: "error.html", + ForceNew: true, + Description: "The object key name to use when a 4XX class error occurs", + }, + "status": { + Type: schema.TypeBool, + Computed: true, + Description: "The status after configuring the bucket website", + }, + }, + Importer: &schema.ResourceImporter{ + StateContext: schema.ImportStatePassthroughContext, + }, + } +} + +func resourceBucketStaticWebsiteCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + client := m.(*common.Client) + service := NewObjectStorageService(client) + + bucketName := d.Get("bucket_name").(string) + + vpcId := d.Get("vpc_id").(string) + regionName := d.Get("region_name").(string) + indexDocument := d.Get("index_document_suffix").(string) + errorDocument := d.Get("error_document_key").(string) + s3ServiceDetail := getServiceEnableRegion(service, vpcId, regionName) + putBucketWebsite := service.PutBucketWebsite(vpcId, s3ServiceDetail.S3ServiceId, bucketName, BucketWebsiteRequest{ + Bucket: bucketName, + Suffix: indexDocument, + Key: errorDocument, + }) + fmt.Println("\n Put bucket website response: \n", putBucketWebsite) + + if !putBucketWebsite.Status { + diag.Errorf("failed to create bucket website for bucket %s", bucketName) + d.Set("status", false) + return nil + } + d.Set("status", true) + d.SetId(bucketName) + return nil +} + +func resourceDeleteBucketStaticWebsite(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + client := m.(*common.Client) + service := NewObjectStorageService(client) + + bucketName := d.Get("bucket_name").(string) + vpcId := d.Get("vpc_id").(string) + regionName := d.Get("region_name").(string) + s3ServiceDetail := getServiceEnableRegion(service, vpcId, regionName) + + resp := service.DeleteBucketStaticWebsite(vpcId, s3ServiceDetail.S3ServiceId, bucketName) + if !resp.Status { + return diag.Errorf("failed to delete bucket website for bucket %s", bucketName) + } + + return nil +} diff --git a/fptcloud/object-storage/resource_bucket_versioning.go b/fptcloud/object-storage/resource_bucket_versioning.go new file mode 100644 index 0000000..4f133fa --- /dev/null +++ b/fptcloud/object-storage/resource_bucket_versioning.go @@ -0,0 +1,74 @@ +package fptcloud_object_storage + +import ( + "context" + "fmt" + common "terraform-provider-fptcloud/commons" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func ResourceBucketVersioning() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceBucketVersioningCreate, + ReadContext: dataSourceBucketVersioningRead, + DeleteContext: resourceBucketVersioningCreate, + Schema: map[string]*schema.Schema{ + "bucket_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Name of the bucket", + }, + "enabled": { + Type: schema.TypeBool, + Required: true, + Description: "Enable or suspend versioning", + ForceNew: true, // Marking this field as ForceNew to ensure that the resource is recreated when the value is changed + }, + "region_name": { + Type: schema.TypeString, + Required: false, + Default: "HCM-02", + Optional: true, + ForceNew: true, + Description: "The region name of the bucket", + }, + "vpc_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The VPC ID", + }, + }, + } +} + +func resourceBucketVersioningCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + client := m.(*common.Client) + service := NewObjectStorageService(client) + + bucketName := d.Get("bucket_name").(string) + enabled := d.Get("enabled").(bool) + + status := "Suspended" + if enabled { + status = "Enabled" + } + vpcId := d.Get("vpc_id").(string) + regionName := d.Get("region_name").(string) + s3ServiceDetail := getServiceEnableRegion(service, vpcId, regionName) + + err := service.PutBucketVersioning(vpcId, s3ServiceDetail.S3ServiceId, bucketName, BucketVersioningRequest{ + Status: status, + }) + + if err != nil { + return diag.FromErr(err) + } + + d.SetId(bucketName) + fmt.Println("Bucket versioning is updated for bucket", bucketName) + return nil +} diff --git a/fptcloud/object-storage/resource_sub_user.go b/fptcloud/object-storage/resource_sub_user.go new file mode 100644 index 0000000..0a7b0fd --- /dev/null +++ b/fptcloud/object-storage/resource_sub_user.go @@ -0,0 +1,112 @@ +package fptcloud_object_storage + +import ( + "context" + common "terraform-provider-fptcloud/commons" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func ResourceSubUser() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceSubUserCreate, + ReadContext: dataSourceSubUserRead, + DeleteContext: resourceSubUserDelete, + Schema: map[string]*schema.Schema{ + "role": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "user_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "vpc_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "region_name": { + Type: schema.TypeString, + Required: false, + Default: "HCM-02", + Optional: true, + ForceNew: true, + }, + }, + } +} + +func resourceSubUserCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + client := m.(*common.Client) + objectStorageService := NewObjectStorageService(client) + + vpcId := d.Get("vpc_id").(string) + req := SubUser{ + Role: d.Get("role").(string), + UserId: d.Get("user_id").(string), + } + s3ServiceDetail := getServiceEnableRegion(objectStorageService, vpcId, d.Get("region_name").(string)) + + subUser, err := objectStorageService.CreateSubUser(req, vpcId, s3ServiceDetail.S3ServiceId) + if err != nil { + return diag.FromErr(err) + } + + d.SetId(subUser.UserId) + d.Set("role", subUser.Role) + return nil +} + +func resourceSubUserDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + client := m.(*common.Client) + objectStorageService := NewObjectStorageService(client) + vpcId := d.Get("vpc_id").(string) + s3ServiceDetail := getServiceEnableRegion(objectStorageService, vpcId, d.Get("region_name").(string)) + err := objectStorageService.DeleteSubUser(d.Id(), vpcId, s3ServiceDetail.S3ServiceId) + if err != nil { + return diag.FromErr(err) + } + + return nil +} + +func resourceSubUserAccessKeyCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + client := m.(*common.Client) + objectStorageService := NewObjectStorageService(client) + + vpcId := d.Get("vpc_id").(string) + s3ServiceId := d.Get("s3_service_id").(string) + subUserId := d.Get("sub_user_id").(string) + + accessKey := objectStorageService.CreateSubUserAccessKey(vpcId, s3ServiceId, subUserId) + if accessKey == nil { + return diag.Errorf("failed to create sub-user access key") + } + + d.SetId(accessKey.Credential.AccessKey) + d.Set("access_key", accessKey.Credential.AccessKey) + d.Set("secret_key", accessKey.Credential.SecretKey) + + return nil +} + +func resourceSubUserAccessKeyDelete(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + client := m.(*common.Client) + objectStorageService := NewObjectStorageService(client) + + vpcId := d.Get("vpc_id").(string) + s3ServiceId := d.Get("s3_service_id").(string) + subUserId := d.Get("sub_user_id").(string) + accessKeyId := d.Id() + + resp := objectStorageService.DeleteSubUserAccessKey(vpcId, s3ServiceId, subUserId, accessKeyId) + if !resp.Status { + return diag.Errorf("failed to delete sub-user access key") + } + + return nil +} diff --git a/fptcloud/provider.go b/fptcloud/provider.go index 23571c8..34a970b 100644 --- a/fptcloud/provider.go +++ b/fptcloud/provider.go @@ -2,24 +2,27 @@ package fptcloud import ( "context" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "log" common "terraform-provider-fptcloud/commons" - "terraform-provider-fptcloud/fptcloud/flavor" - "terraform-provider-fptcloud/fptcloud/floating-ip" - "terraform-provider-fptcloud/fptcloud/floating-ip-association" - "terraform-provider-fptcloud/fptcloud/image" - "terraform-provider-fptcloud/fptcloud/instance" - "terraform-provider-fptcloud/fptcloud/instance-group" - "terraform-provider-fptcloud/fptcloud/instance-group-policy" - "terraform-provider-fptcloud/fptcloud/security-group" - "terraform-provider-fptcloud/fptcloud/security-group-rule" - "terraform-provider-fptcloud/fptcloud/ssh" - "terraform-provider-fptcloud/fptcloud/storage" - "terraform-provider-fptcloud/fptcloud/storage-policy" - "terraform-provider-fptcloud/fptcloud/subnet" - "terraform-provider-fptcloud/fptcloud/vpc" + fptcloud_flavor "terraform-provider-fptcloud/fptcloud/flavor" + fptcloud_floating_ip "terraform-provider-fptcloud/fptcloud/floating-ip" + fptcloud_floating_ip_association "terraform-provider-fptcloud/fptcloud/floating-ip-association" + fptcloud_image "terraform-provider-fptcloud/fptcloud/image" + fptcloud_instance "terraform-provider-fptcloud/fptcloud/instance" + fptcloud_instance_group "terraform-provider-fptcloud/fptcloud/instance-group" + fptcloud_instance_group_policy "terraform-provider-fptcloud/fptcloud/instance-group-policy" + + fptcloud_object_storage "terraform-provider-fptcloud/fptcloud/object-storage" + fptcloud_security_group "terraform-provider-fptcloud/fptcloud/security-group" + fptcloud_security_group_rule "terraform-provider-fptcloud/fptcloud/security-group-rule" + fptcloud_ssh "terraform-provider-fptcloud/fptcloud/ssh" + fptcloud_storage "terraform-provider-fptcloud/fptcloud/storage" + fptcloud_storage_policy "terraform-provider-fptcloud/fptcloud/storage-policy" + fptcloud_subnet "terraform-provider-fptcloud/fptcloud/subnet" + fptcloud_vpc "terraform-provider-fptcloud/fptcloud/vpc" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) var ( @@ -60,29 +63,45 @@ func Provider() *schema.Provider { }, }, DataSourcesMap: map[string]*schema.Resource{ - "fptcloud_storage_policy": fptcloud_storage_policy.DataSourceStoragePolicy(), - "fptcloud_storage": fptcloud_storage.DataSourceStorage(), - "fptcloud_ssh_key": fptcloud_ssh.DataSourceSSHKey(), - "fptcloud_vpc": fptcloud_vpc.NewDataSource(), - "fptcloud_flavor": fptcloud_flavor.DataSourceFlavor(), - "fptcloud_image": fptcloud_image.DataSourceImage(), - "fptcloud_security_group": fptcloud_security_group.DataSourceSecurityGroup(), - "fptcloud_instance": fptcloud_instance.DataSourceInstance(), - "fptcloud_instance_group_policy": fptcloud_instance_group_policy.DataSourceInstanceGroupPolicy(), - "fptcloud_instance_group": fptcloud_instance_group.DataSourceInstanceGroup(), - "fptcloud_floating_ip": fptcloud_floating_ip.DataSourceFloatingIp(), - "fptcloud_subnet": fptcloud_subnet.DataSourceSubnet(), + "fptcloud_storage_policy": fptcloud_storage_policy.DataSourceStoragePolicy(), + "fptcloud_storage": fptcloud_storage.DataSourceStorage(), + "fptcloud_ssh_key": fptcloud_ssh.DataSourceSSHKey(), + "fptcloud_vpc": fptcloud_vpc.NewDataSource(), + "fptcloud_flavor": fptcloud_flavor.DataSourceFlavor(), + "fptcloud_image": fptcloud_image.DataSourceImage(), + "fptcloud_security_group": fptcloud_security_group.DataSourceSecurityGroup(), + "fptcloud_instance": fptcloud_instance.DataSourceInstance(), + "fptcloud_instance_group_policy": fptcloud_instance_group_policy.DataSourceInstanceGroupPolicy(), + "fptcloud_instance_group": fptcloud_instance_group.DataSourceInstanceGroup(), + "fptcloud_floating_ip": fptcloud_floating_ip.DataSourceFloatingIp(), + "fptcloud_subnet": fptcloud_subnet.DataSourceSubnet(), + "fptcloud_object_storage_access_key": fptcloud_object_storage.DataSourceAccessKey(), + "fptcloud_object_storage_sub_user": fptcloud_object_storage.DataSourceSubUser(), + "fptcloud_object_storage_bucket": fptcloud_object_storage.DataSourceBucket(), + "fptcloud_object_storage_bucket_policy": fptcloud_object_storage.DataSourceBucketPolicy(), + "fptcloud_object_storage_bucket_cors": fptcloud_object_storage.DataSourceBucketCors(), + "fptcloud_object_storage_bucket_versioning": fptcloud_object_storage.DataSourceBucketVersioning(), + "fptcloud_object_storage_lifecycle": fptcloud_object_storage.DataSourceBucketLifecycle(), + "fptcloud_object_storage_static_website": fptcloud_object_storage.DataSourceBucketStaticWebsite(), + "fptcloud_object_storage_sub_user_detail": fptcloud_object_storage.DataSourceSubUserDetail(), }, ResourcesMap: map[string]*schema.Resource{ - "fptcloud_storage": fptcloud_storage.ResourceStorage(), - "fptcloud_ssh_key": fptcloud_ssh.ResourceSSHKey(), - "fptcloud_security_group": fptcloud_security_group.ResourceSecurityGroup(), - "fptcloud_security_group_rule": fptcloud_security_group_rule.ResourceSecurityGroupRule(), - "fptcloud_instance": fptcloud_instance.ResourceInstance(), - "fptcloud_instance_group": fptcloud_instance_group.ResourceInstanceGroup(), - "fptcloud_floating_ip": fptcloud_floating_ip.ResourceFloatingIp(), - "fptcloud_floating_ip_association": fptcloud_floating_ip_association.ResourceFloatingIpAssociation(), - "fptcloud_subnet": fptcloud_subnet.ResourceSubnet(), + "fptcloud_storage": fptcloud_storage.ResourceStorage(), + "fptcloud_ssh_key": fptcloud_ssh.ResourceSSHKey(), + "fptcloud_security_group": fptcloud_security_group.ResourceSecurityGroup(), + "fptcloud_security_group_rule": fptcloud_security_group_rule.ResourceSecurityGroupRule(), + "fptcloud_instance": fptcloud_instance.ResourceInstance(), + "fptcloud_instance_group": fptcloud_instance_group.ResourceInstanceGroup(), + "fptcloud_floating_ip": fptcloud_floating_ip.ResourceFloatingIp(), + "fptcloud_floating_ip_association": fptcloud_floating_ip_association.ResourceFloatingIpAssociation(), + "fptcloud_subnet": fptcloud_subnet.ResourceSubnet(), + "fptcloud_object_storage_bucket": fptcloud_object_storage.ResourceBucket(), + "fptcloud_object_storage_sub_user": fptcloud_object_storage.ResourceSubUser(), + "fptcloud_object_storage_access_key": fptcloud_object_storage.ResourceAccessKey(), + "fptcloud_object_storage_bucket_cors": fptcloud_object_storage.ResourceBucketCors(), + "fptcloud_object_storage_bucket_policy": fptcloud_object_storage.ResourceBucketPolicy(), + "fptcloud_object_storage_bucket_versioning": fptcloud_object_storage.ResourceBucketVersioning(), + "fptcloud_object_storage_static_website": fptcloud_object_storage.ResourceBucketStaticWebsite(), }, ConfigureContextFunc: providerConfigureContext, } diff --git a/main.go b/main.go index 88eed1c..6e615a5 100644 --- a/main.go +++ b/main.go @@ -3,12 +3,13 @@ package main import ( "context" "flag" + "log" + "terraform-provider-fptcloud/fptcloud" + "github.com/hashicorp/terraform-plugin-framework/providerserver" "github.com/hashicorp/terraform-plugin-go/tfprotov5" "github.com/hashicorp/terraform-plugin-go/tfprotov5/tf5server" "github.com/hashicorp/terraform-plugin-mux/tf5muxserver" - "log" - "terraform-provider-fptcloud/fptcloud" ) //go:generate go run github.com/hashicorp/terraform-plugin-docs/cmd/tfplugindocs generate -provider-name terraform-provider-fptcloud @@ -24,11 +25,13 @@ func main() { "set to true to run the provider with support for debuggers", ) flag.Parse() + log.Printf("[DEBUG] Configuring provider...") providers := []func() tfprotov5.ProviderServer{ providerserver.NewProtocol5(fptcloud.NewXplatProvider("dev")()), fptcloud.Provider().GRPCProvider, } + log.Printf("[DEBUG] providers: ", providers) muxServer, err := tf5muxserver.NewMuxServer(ctx, providers...) if err != nil { From d4c536eb47e9cd9ddc712e1751741c8901c27483 Mon Sep 17 00:00:00 2001 From: hoanglm Date: Tue, 12 Nov 2024 01:51:25 +0700 Subject: [PATCH 2/8] [Object Storage] update: fix sub user, split sub user keys into another file --- commons/api_path.go | 15 ++- commons/client.go | 8 +- .../datasource_object_storage.go | 84 +++++++------ .../datasource_object_storage_access_key.go | 4 + .../datasource_object_storage_bucket.go | 4 + .../datasource_object_storage_bucket_cors.go | 4 + ...datasource_object_storage_bucket_policy.go | 3 + .../datasource_object_storage_lifecycle.go | 7 +- ...atasource_object_storage_static_website.go | 12 +- .../datasource_object_storage_sub_user.go | 43 +++++-- ...tasource_object_storage_sub_user_detail.go | 26 ++-- .../datasource_object_storage_versioning.go | 21 +++- .../object-storage/resource_access_key.go | 78 +++++++++--- fptcloud/object-storage/resource_bucket.go | 30 +++-- .../object-storage/resource_bucket_acl.go | 113 ++++++++++++++++++ .../object-storage/resource_bucket_cors.go | 7 ++ .../object-storage/resource_bucket_policy.go | 11 +- .../resource_bucket_static_website.go | 23 ++-- .../resource_bucket_versioning.go | 34 +++--- fptcloud/object-storage/resource_sub_user.go | 65 +++------- .../object-storage/resource_sub_user_keys.go | 70 +++++++++++ fptcloud/provider.go | 34 +++--- 22 files changed, 494 insertions(+), 202 deletions(-) create mode 100644 fptcloud/object-storage/resource_bucket_acl.go create mode 100644 fptcloud/object-storage/resource_sub_user_keys.go diff --git a/commons/api_path.go b/commons/api_path.go index 7d66de6..a5b74f3 100644 --- a/commons/api_path.go +++ b/commons/api_path.go @@ -68,7 +68,7 @@ var ApiPath = struct { // Bucket ListBuckets func(vpcId, s3ServiceId string, page, pageSize int) string CreateBucket func(vpcId, s3ServiceId string) string - DeleteBucket func(vpcId, s3ServiceId, bucketName string) string + DeleteBucket func(vpcId, s3ServiceId string) string // Bucket Policy GetBucketPolicy func(vpcId, s3ServiceId, bucketName string) string PutBucketPolicy func(vpcId, s3ServiceId, bucketName string) string @@ -268,7 +268,6 @@ var ApiPath = struct { // Object Storage // Common CheckS3ServiceEnable: func(vpcId string) string { - fmt.Println("vpcId: ", vpcId) return fmt.Sprintf("/v1/vmware/vpc/%s/s3/check-service-enabled?check_unlimited=undefined", vpcId) }, @@ -280,8 +279,8 @@ var ApiPath = struct { return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/buckets/create", vpcId, s3ServiceId) }, - DeleteBucket: func(vpcId, s3ServiceId, bucketName string) string { - return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/buckets/%s/delete", vpcId, s3ServiceId, bucketName) + DeleteBucket: func(vpcId, s3ServiceId string) string { + return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/buckets/delete", vpcId, s3ServiceId) }, // Bucket Versioning @@ -348,15 +347,16 @@ var ApiPath = struct { DeleteSubUser: func(vpcId, s3ServiceId, subUserId string) string { return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/sub-users/%s/delete", vpcId, s3ServiceId, subUserId) }, + DetailSubUser: func(vpcId, s3ServiceId, subUserId string) string { + return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/sub-users/%s/detail", vpcId, s3ServiceId, subUserId) + }, + // Sub-user Access Key CreateSubUserAccessKey: func(vpcId, s3ServiceId, subUserId string) string { return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/sub-users/%s/credentials/create", vpcId, s3ServiceId, subUserId) }, DeleteSubUserAccessKey: func(vpcId, s3ServiceId, subUserId, accessKeyId string) string { return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/sub-users/%s/credentials/%s/delete", vpcId, s3ServiceId, subUserId, accessKeyId) }, - DetailSubUser: func(vpcId, s3ServiceId, subUserId string) string { - return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/sub-users/%s/detail", vpcId, s3ServiceId, subUserId) - }, // Access Key ListAccessKeys: func(vpcId, s3ServiceId string) string { @@ -365,7 +365,6 @@ var ApiPath = struct { CreateAccessKey: func(vpcId, s3ServiceId string) string { return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/user/credentials", vpcId, s3ServiceId) }, - // https://console-api.fptcloud.com/api/v1/vmware/vpc/1dce0aa0-a78d-4e19-89a3-d688bcff7f1b/s3/d8c82109-3d17-4ac2-8b21-5fedb2d81c54/user/credentials/delete DeleteAccessKey: func(vpcId, s3ServiceId string) string { return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/user/credentials/delete", vpcId, s3ServiceId) }, diff --git a/commons/client.go b/commons/client.go index 4eee795..415e88a 100644 --- a/commons/client.go +++ b/commons/client.go @@ -112,9 +112,6 @@ func (c *Client) SendRequest(req *http.Request) ([]byte, error) { body, err := io.ReadAll(resp.Body) c.LastJSONResponse = string(body) - fmt.Println("resp.StatusCode: ", resp.StatusCode) - fmt.Println("resp.BODY: ", string(body)) - fmt.Println("resp.URL: ", resp.Request.URL.String()) if resp.StatusCode >= 300 { return nil, HTTPError{Code: resp.StatusCode, Status: resp.Status, Reason: string(body)} @@ -178,7 +175,10 @@ func (c *Client) SendDeleteRequestWithBody(requestURL string, params interface{} u := c.PrepareClientURL(requestURL) // we create a new buffer and encode everything to json to send it in the request - jsonValue, _ := json.Marshal(params) + jsonValue, err := json.Marshal(params) + if err != nil { + return nil, err + } req, err := http.NewRequest("DELETE", u.String(), bytes.NewBuffer(jsonValue)) if err != nil { diff --git a/fptcloud/object-storage/datasource_object_storage.go b/fptcloud/object-storage/datasource_object_storage.go index 37afbcb..f813054 100644 --- a/fptcloud/object-storage/datasource_object_storage.go +++ b/fptcloud/object-storage/datasource_object_storage.go @@ -34,9 +34,30 @@ type CreateAccessKeyResponse struct { CreatedDate interface{} `json:"createdDate"` } `json:"credential,omitempty"` } +type SubUserCreateKeyResponse struct { + Status bool `json:"status"` + Credential struct { + AccessKey string `json:"accessKey"` + SecretKey string `json:"secretKey"` + Active interface{} `json:"active"` + CreatedDate interface{} `json:"createdDate"` + } `json:"credential"` +} + type SubUser struct { Role string `json:"role"` - UserId string `json:"user_id,omitempty"` + UserId string `json:"user_id"` +} +type SubUserListResponse struct { + SubUsers []struct { + UserID string `json:"user_id"` + Arn string `json:"arn"` + Active bool `json:"active"` + Role string `json:"role"` + CreatedAt interface{} `json:"created_at"` + AccessKeys interface{} `json:"access_keys"` + } `json:"sub_users"` + Total int `json:"total"` } type CommonResponse struct { Status bool `json:"status"` @@ -199,7 +220,7 @@ type ObjectStorageService interface { // Bucket ListBuckets(vpcId, s3ServiceId string, page, pageSize int) ListBucketResponse CreateBucket(req BucketRequest, vpcId, s3ServiceId string) CommonResponse - DeleteBucket(vpcId, s3ServiceId, bucketName string) error + DeleteBucket(vpcId, s3ServiceId, bucketName string) CommonResponse // Access key ListAccessKeys(vpcId, s3ServiceId string) (AccessKey, error) @@ -207,11 +228,11 @@ type ObjectStorageService interface { CreateAccessKey(vpcId, s3ServiceId string) *CreateAccessKeyResponse // Sub user - CreateSubUser(req SubUser, vpcId, s3ServiceId string) (*SubUser, error) + CreateSubUser(req SubUser, vpcId, s3ServiceId string) *CommonResponse DeleteSubUser(vpcId, s3ServiceId, subUserId string) error - ListSubUsers(vpcId, s3ServiceId string) ([]SubUser, error) + ListSubUsers(vpcId, s3ServiceId string) ([]SubUserListResponse, error) DetailSubUser(vpcId, s3ServiceId, subUserId string) *DetailSubUser - CreateSubUserAccessKey(vpcId, s3ServiceId, subUserId string) *CreateAccessKeyResponse + CreateSubUserAccessKey(vpcId, s3ServiceId, subUserId string) *SubUserCreateKeyResponse DeleteSubUserAccessKey(vpcId, s3ServiceId, subUserId, accessKeyId string) CommonResponse // bucket configuration @@ -229,7 +250,7 @@ type ObjectStorageService interface { // Acl configuration PutBucketAcl(vpcId, s3ServiceId, bucketName string, acl BucketAclRequest) PutBucketAclResponse - GetBucketAcl(vpcId, s3ServiceId, bucketName string) (*BucketAclResponse, error) + GetBucketAcl(vpcId, s3ServiceId, bucketName string) *BucketAclResponse // Static website configuration PutBucketWebsite(vpcId, s3ServiceId, bucketName string, website BucketWebsiteRequest) CommonResponse @@ -269,7 +290,6 @@ func (s *ObjectStorageServiceImpl) CheckServiceEnable(vpcId string) S3ServiceEna func (s *ObjectStorageServiceImpl) CreateBucket(req BucketRequest, vpcId, s3ServiceId string) CommonResponse { apiPath := common.ApiPath.CreateBucket(vpcId, s3ServiceId) - fmt.Println("apiPath", apiPath) resp, err := s.client.SendPostRequest(apiPath, req) if err != nil { return CommonResponse{Status: false, Message: err.Error()} @@ -285,26 +305,25 @@ func (s *ObjectStorageServiceImpl) CreateBucket(req BucketRequest, vpcId, s3Serv } // CreateSubUser creates a new sub-user -func (s *ObjectStorageServiceImpl) CreateSubUser(req SubUser, vpcId, s3ServiceId string) (*SubUser, error) { +func (s *ObjectStorageServiceImpl) CreateSubUser(req SubUser, vpcId, s3ServiceId string) *CommonResponse { apiPath := common.ApiPath.CreateSubUser(vpcId, s3ServiceId) resp, err := s.client.SendPostRequest(apiPath, req) if err != nil { - return nil, fmt.Errorf("failed to create sub-user: %v", err) + return &CommonResponse{Status: false, Message: err.Error()} } - var subUser SubUser + var subUser CommonResponse err = json.Unmarshal(resp, &subUser) if err != nil { - return nil, fmt.Errorf("failed to unmarshal sub-user response: %v", err) + return &CommonResponse{Status: false, Message: err.Error()} } - return &subUser, nil + return &CommonResponse{Status: subUser.Status, Message: "Sub-user created successfully"} } func (s *ObjectStorageServiceImpl) CreateAccessKey(vpcId, s3ServiceId string) *CreateAccessKeyResponse { apiPath := common.ApiPath.CreateAccessKey(vpcId, s3ServiceId) resp, err := s.client.SendPostRequest(apiPath, nil) - fmt.Println("resp", resp) if err != nil { return &CreateAccessKeyResponse{Status: false, Message: err.Error()} } @@ -334,14 +353,14 @@ func (s *ObjectStorageServiceImpl) ListBuckets(vpcId, s3ServiceId string, page, return buckets } -func (s *ObjectStorageServiceImpl) ListSubUsers(vpcId, s3ServiceId string) ([]SubUser, error) { +func (s *ObjectStorageServiceImpl) ListSubUsers(vpcId, s3ServiceId string) ([]SubUserListResponse, error) { apiPath := common.ApiPath.ListSubUsers(vpcId, s3ServiceId) resp, err := s.client.SendGetRequest(apiPath) if err != nil { return nil, fmt.Errorf("failed to list sub-users: %v", err) } - var subUsers []SubUser + var subUsers []SubUserListResponse err = json.Unmarshal(resp, &subUsers) if err != nil { return nil, fmt.Errorf("failed to unmarshal sub-user list response: %v", err) @@ -366,23 +385,20 @@ func (s *ObjectStorageServiceImpl) ListAccessKeys(vpcId, s3ServiceId string) (Ac return accessKeys, nil } -func (s *ObjectStorageServiceImpl) DeleteBucket(vpcId, s3ServiceId, bucketName string) error { - apiPath := common.ApiPath.DeleteBucket(vpcId, s3ServiceId, bucketName) - if _, err := s.client.SendDeleteRequest(apiPath); err != nil { - return fmt.Errorf("failed to delete bucket: %v", err) +func (s *ObjectStorageServiceImpl) DeleteBucket(vpcId, s3ServiceId, bucketName string) CommonResponse { + apiPath := common.ApiPath.DeleteBucket(vpcId, s3ServiceId) + payload := map[string]string{"name": bucketName} + + if _, err := s.client.SendDeleteRequestWithBody(apiPath, payload); err != nil { + + return CommonResponse{Status: false} } - return nil + return CommonResponse{Status: true, Message: "Bucket deleted successfully"} } func (s *ObjectStorageServiceImpl) DeleteAccessKey(vpcId, s3ServiceId, accessKeyId string) error { apiPath := common.ApiPath.DeleteAccessKey(vpcId, s3ServiceId) body := map[string]string{"accessKey": accessKeyId} - fmt.Println("-----------------") - fmt.Printf("[INFO] Deleting access key: %s\n", accessKeyId) - fmt.Printf("[INFO] vpc_id: %s\n", vpcId) - fmt.Printf("[INFO] body: %s\n", body) - fmt.Printf("[INFO] apiPath: %s\n", apiPath) - fmt.Println("-----------------") if _, err := s.client.SendDeleteRequestWithBody(apiPath, body); err != nil { return fmt.Errorf("failed to delete access key: %v", err) } @@ -508,18 +524,18 @@ func (s *ObjectStorageServiceImpl) PutBucketAcl(vpcId, s3ServiceId, bucketName s return putBucketAclResponse } -func (s *ObjectStorageServiceImpl) GetBucketAcl(vpcId, s3ServiceId, bucketName string) (*BucketAclResponse, error) { +func (s *ObjectStorageServiceImpl) GetBucketAcl(vpcId, s3ServiceId, bucketName string) *BucketAclResponse { apiPath := common.ApiPath.GetBucketAcl(vpcId, s3ServiceId, bucketName) resp, err := s.client.SendGetRequest(apiPath) if err != nil { - return nil, fmt.Errorf("failed to get bucket ACL: %v", err) + return &BucketAclResponse{Status: false} } var acl BucketAclResponse if err := json.Unmarshal(resp, &acl); err != nil { - return nil, fmt.Errorf("failed to unmarshal bucket ACL: %v", err) + return &BucketAclResponse{Status: false} } - return &acl, nil + return &acl } func (s *ObjectStorageServiceImpl) DeleteSubUser(vpcId, s3ServiceId, subUserId string) error { @@ -572,18 +588,18 @@ func (s *ObjectStorageServiceImpl) DeleteBucketLifecycle(vpcId, s3ServiceId, buc return &bucketLifecycle, nil } -func (s *ObjectStorageServiceImpl) CreateSubUserAccessKey(vpcId, s3ServiceId, subUserId string) *CreateAccessKeyResponse { +func (s *ObjectStorageServiceImpl) CreateSubUserAccessKey(vpcId, s3ServiceId, subUserId string) *SubUserCreateKeyResponse { apiPath := common.ApiPath.CreateSubUserAccessKey(vpcId, s3ServiceId, subUserId) resp, err := s.client.SendPostRequest(apiPath, nil) if err != nil { return nil } - var accessKey CreateAccessKeyResponse - if err := json.Unmarshal(resp, &accessKey); err != nil { + var subUserKeys SubUserCreateKeyResponse + if err := json.Unmarshal(resp, &subUserKeys); err != nil { return nil } - return &accessKey + return &subUserKeys } func (s *ObjectStorageServiceImpl) DeleteSubUserAccessKey(vpcId, s3ServiceId, subUserId, accessKeyId string) CommonResponse { diff --git a/fptcloud/object-storage/datasource_object_storage_access_key.go b/fptcloud/object-storage/datasource_object_storage_access_key.go index ecb442a..275f01b 100644 --- a/fptcloud/object-storage/datasource_object_storage_access_key.go +++ b/fptcloud/object-storage/datasource_object_storage_access_key.go @@ -2,6 +2,7 @@ package fptcloud_object_storage import ( "context" + "fmt" common "terraform-provider-fptcloud/commons" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -56,6 +57,9 @@ func dataSourceAccessKeyRead(ctx context.Context, d *schema.ResourceData, m inte vpcId := d.Get("vpc_id").(string) regionName := d.Get("region_name").(string) s3ServiceDetail := getServiceEnableRegion(service, vpcId, regionName) + if s3ServiceDetail.S3ServiceId == "" { + return diag.FromErr(fmt.Errorf("region %s is not enabled", d.Get("region_name").(string))) + } _, err := service.ListAccessKeys(vpcId, s3ServiceDetail.S3ServiceId) if err != nil { return diag.FromErr(err) diff --git a/fptcloud/object-storage/datasource_object_storage_bucket.go b/fptcloud/object-storage/datasource_object_storage_bucket.go index 4f39169..420827d 100644 --- a/fptcloud/object-storage/datasource_object_storage_bucket.go +++ b/fptcloud/object-storage/datasource_object_storage_bucket.go @@ -2,6 +2,7 @@ package fptcloud_object_storage import ( "context" + "fmt" common "terraform-provider-fptcloud/commons" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -46,6 +47,9 @@ func dataSourceBucketRead(ctx context.Context, d *schema.ResourceData, m interfa service := NewObjectStorageService(client) vpcId := d.Get("vpc_id").(string) s3ServiceDetail := getServiceEnableRegion(service, vpcId, d.Get("region_name").(string)) + if s3ServiceDetail.S3ServiceId == "" { + return diag.FromErr(fmt.Errorf("region %s is not enabled", d.Get("region_name").(string))) + } page := 1 if d.Get("page") != nil { page = d.Get("page").(int) diff --git a/fptcloud/object-storage/datasource_object_storage_bucket_cors.go b/fptcloud/object-storage/datasource_object_storage_bucket_cors.go index e94916c..6f5460a 100644 --- a/fptcloud/object-storage/datasource_object_storage_bucket_cors.go +++ b/fptcloud/object-storage/datasource_object_storage_bucket_cors.go @@ -2,6 +2,7 @@ package fptcloud_object_storage import ( "context" + "fmt" common "terraform-provider-fptcloud/commons" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -74,6 +75,9 @@ func dataSourceBucketCorsRead(ctx context.Context, d *schema.ResourceData, m int service := NewObjectStorageService(client) vpcId := d.Get("vpc_id").(string) s3ServiceDetail := getServiceEnableRegion(service, vpcId, d.Get("region_name").(string)) + if s3ServiceDetail.S3ServiceId == "" { + return diag.FromErr(fmt.Errorf("region %s is not enabled", d.Get("region_name").(string))) + } bucketName := d.Get("bucket_name").(string) corsRule, err := service.GetBucketCors(vpcId, s3ServiceDetail.S3ServiceId, bucketName) diff --git a/fptcloud/object-storage/datasource_object_storage_bucket_policy.go b/fptcloud/object-storage/datasource_object_storage_bucket_policy.go index 15c323a..6200bdc 100644 --- a/fptcloud/object-storage/datasource_object_storage_bucket_policy.go +++ b/fptcloud/object-storage/datasource_object_storage_bucket_policy.go @@ -39,6 +39,9 @@ func dataSourceBucketPolicyRead(ctx context.Context, d *schema.ResourceData, m i bucketName := d.Get("bucket_name").(string) vpcId := d.Get("vpc_id").(string) s3ServiceDetail := getServiceEnableRegion(service, vpcId, d.Get("region_name").(string)) + if s3ServiceDetail.S3ServiceId == "" { + return diag.FromErr(fmt.Errorf("region %s is not enabled", d.Get("region_name").(string))) + } policyResponse := service.GetBucketPolicy(vpcId, bucketName, s3ServiceDetail.S3ServiceId) if !policyResponse.Status { return diag.Errorf("failed to get bucket policy for bucket %s", bucketName) diff --git a/fptcloud/object-storage/datasource_object_storage_lifecycle.go b/fptcloud/object-storage/datasource_object_storage_lifecycle.go index 7f3544d..34a883c 100644 --- a/fptcloud/object-storage/datasource_object_storage_lifecycle.go +++ b/fptcloud/object-storage/datasource_object_storage_lifecycle.go @@ -30,9 +30,7 @@ func DataSourceBucketLifecycle() *schema.Resource { }, "region_name": { Type: schema.TypeString, - Required: false, - Default: "HCM-02", - Optional: true, + Required: true, Description: "The region name of the bucket", }, "page_size": { @@ -58,6 +56,9 @@ func dataSourceBucketLifecycle(ctx context.Context, d *schema.ResourceData, m in bucketName := d.Get("bucket_name").(string) vpcId := d.Get("vpc_id").(string) s3ServiceDetail := getServiceEnableRegion(service, vpcId, d.Get("region_name").(string)) + if s3ServiceDetail.S3ServiceId == "" { + return diag.FromErr(fmt.Errorf("region %s is not enabled", d.Get("region_name").(string))) + } page := d.Get("page").(string) pageSize := d.Get("page_size").(string) diff --git a/fptcloud/object-storage/datasource_object_storage_static_website.go b/fptcloud/object-storage/datasource_object_storage_static_website.go index 00c3692..d227fa5 100644 --- a/fptcloud/object-storage/datasource_object_storage_static_website.go +++ b/fptcloud/object-storage/datasource_object_storage_static_website.go @@ -2,6 +2,7 @@ package fptcloud_object_storage import ( "context" + "fmt" common "terraform-provider-fptcloud/commons" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -24,9 +25,7 @@ func DataSourceBucketStaticWebsite() *schema.Resource { }, "region_name": { Type: schema.TypeString, - Required: false, - Default: "HCM-02", - Optional: true, + Required: true, Description: "The region name of the bucket", }, "status": { @@ -85,14 +84,15 @@ func dataSourceBucketStaticWebsite(ctx context.Context, d *schema.ResourceData, bucketName := d.Get("bucket_name").(string) vpcId := d.Get("vpc_id").(string) s3ServiceDetail := getServiceEnableRegion(service, vpcId, d.Get("region_name").(string)) + if s3ServiceDetail.S3ServiceId == "" { + return diag.FromErr(fmt.Errorf("region %s is not enabled", d.Get("region_name").(string))) + } staticWebsiteResponse := service.GetBucketWebsite(vpcId, bucketName, s3ServiceDetail.S3ServiceId) if !staticWebsiteResponse.Status { - return diag.Errorf("failed to get bucket policy for bucket %s", bucketName) + return diag.Errorf("failed to get bucket static website config for bucket %s", bucketName) } - d.SetId(bucketName) - // Set the computed values if err := d.Set("status", staticWebsiteResponse.Status); err != nil { return diag.FromErr(err) diff --git a/fptcloud/object-storage/datasource_object_storage_sub_user.go b/fptcloud/object-storage/datasource_object_storage_sub_user.go index f59f439..88560ec 100644 --- a/fptcloud/object-storage/datasource_object_storage_sub_user.go +++ b/fptcloud/object-storage/datasource_object_storage_sub_user.go @@ -2,6 +2,7 @@ package fptcloud_object_storage import ( "context" + "fmt" common "terraform-provider-fptcloud/commons" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -16,7 +17,7 @@ func DataSourceSubUser() *schema.Resource { "role": { Type: schema.TypeString, Required: true, - Description: "Role of the sub-user", + Description: "Role of the sub-user, should be one of the following: SubUserNone, SubUserRead, SubUserReadWrite, SubUserWrite, SubUserFull", }, "user_id": { Type: schema.TypeString, @@ -29,6 +30,28 @@ func DataSourceSubUser() *schema.Resource { Required: true, Description: "The VPC ID", }, + "region_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The region name of sub-user", + }, + "list_sub_user": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "user_id": { + Type: schema.TypeString, + Computed: true, + }, + "role": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, }, } } @@ -38,6 +61,9 @@ func dataSourceSubUserRead(ctx context.Context, d *schema.ResourceData, m interf service := NewObjectStorageService(client) vpcId := d.Get("vpc_id").(string) s3ServiceDetail := getServiceEnableRegion(service, vpcId, d.Get("region_name").(string)) + if s3ServiceDetail.S3ServiceId == "" { + return diag.FromErr(fmt.Errorf("region %s is not enabled", d.Get("region_name").(string))) + } subUsers, err := service.ListSubUsers(vpcId, s3ServiceDetail.S3ServiceId) if err != nil { @@ -45,13 +71,14 @@ func dataSourceSubUserRead(ctx context.Context, d *schema.ResourceData, m interf } role := d.Get("role").(string) - for _, user := range subUsers { - if user.Role == role { - d.SetId(user.UserId) - d.Set("user_id", user.UserId) - return nil - } - } + fmt.Println("subUsers: ", subUsers) + // for _, user := range subUsers { + // if user.Role == role { + // d.SetId(user.UserId) + // d.Set("user_id", user.UserId) + // return nil + // } + // } return diag.Errorf("sub-user with role %s not found", role) } diff --git a/fptcloud/object-storage/datasource_object_storage_sub_user_detail.go b/fptcloud/object-storage/datasource_object_storage_sub_user_detail.go index ab4888e..9a223f2 100644 --- a/fptcloud/object-storage/datasource_object_storage_sub_user_detail.go +++ b/fptcloud/object-storage/datasource_object_storage_sub_user_detail.go @@ -2,6 +2,8 @@ package fptcloud_object_storage import ( "context" + "fmt" + "reflect" common "terraform-provider-fptcloud/commons" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -17,16 +19,6 @@ func DataSourceSubUserDetail() *schema.Resource { Required: true, Description: "The VPC ID", }, - "s3_service_id": { - Type: schema.TypeString, - Required: true, - Description: "The S3 service ID", - }, - "sub_user_id": { - Type: schema.TypeString, - Required: true, - Description: "The sub-user ID", - }, "user_id": { Type: schema.TypeString, Computed: true, @@ -66,13 +58,16 @@ func DataSourceSubUserDetail() *schema.Resource { func dataSourceSubUserDetailRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { client := m.(*common.Client) - service := NewObjectStorageService(client) + objectStorageService := NewObjectStorageService(client) vpcId := d.Get("vpc_id").(string) - s3ServiceId := d.Get("s3_service_id").(string) - subUserId := d.Get("sub_user_id").(string) + s3ServiceDetail := getServiceEnableRegion(objectStorageService, vpcId, d.Get("region_name").(string)) + if s3ServiceDetail.S3ServiceId == "" { + return diag.FromErr(fmt.Errorf("region %s is not enabled", d.Get("region_name").(string))) + } + subUserId := d.Get("user_id").(string) - subUser := service.DetailSubUser(vpcId, s3ServiceId, subUserId) + subUser := objectStorageService.DetailSubUser(vpcId, s3ServiceDetail.S3ServiceId, subUserId) if subUser == nil { return diag.Errorf("sub-user with ID %s not found", subUserId) } @@ -82,7 +77,8 @@ func dataSourceSubUserDetailRead(ctx context.Context, d *schema.ResourceData, m if subUser.Arn != nil { d.Set("arn", subUser.Arn) } - d.Set("active", subUser.Active) + fmt.Println("subUser active is: ", subUser.Active) + fmt.Println("reflect subUser active is: ", reflect.TypeOf(subUser.Active)) d.Set("role", subUser.Role) if subUser.CreatedAt != nil { d.Set("created_at", subUser.CreatedAt) diff --git a/fptcloud/object-storage/datasource_object_storage_versioning.go b/fptcloud/object-storage/datasource_object_storage_versioning.go index d45b2e8..1f73478 100644 --- a/fptcloud/object-storage/datasource_object_storage_versioning.go +++ b/fptcloud/object-storage/datasource_object_storage_versioning.go @@ -2,6 +2,7 @@ package fptcloud_object_storage import ( "context" + "fmt" common "terraform-provider-fptcloud/commons" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -23,10 +24,17 @@ func DataSourceBucketVersioning() *schema.Resource { Required: true, Description: "The VPC ID", }, - "enabled": { - Type: schema.TypeBool, - Computed: true, - Description: "Enable or suspend versioning", + "versioning_status": { + Type: schema.TypeString, + Required: true, + Description: "Status of the versioning, must be Enabled or Suspended", + ForceNew: true, // Marking this field as ForceNew to ensure that the resource is recreated when the value is changed + }, + "region_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The region name to create the access key", }, }, } @@ -38,6 +46,9 @@ func dataSourceBucketVersioningRead(ctx context.Context, d *schema.ResourceData, vpcId := d.Get("vpc_id").(string) s3ServiceDetail := getServiceEnableRegion(service, vpcId, d.Get("region_name").(string)) + if s3ServiceDetail.S3ServiceId == "" { + return diag.FromErr(fmt.Errorf("region %s is not enabled", d.Get("region_name").(string))) + } bucketName := d.Get("bucket_name").(string) versioning := service.GetBucketVersioning(vpcId, bucketName, s3ServiceDetail.S3ServiceId) @@ -46,7 +57,7 @@ func dataSourceBucketVersioningRead(ctx context.Context, d *schema.ResourceData, } d.SetId(bucketName) - d.Set("enabled", versioning.Status == "Enabled") + d.Set("versioning_status", versioning.Status) return nil } diff --git a/fptcloud/object-storage/resource_access_key.go b/fptcloud/object-storage/resource_access_key.go index fc3586a..eb5ab4d 100644 --- a/fptcloud/object-storage/resource_access_key.go +++ b/fptcloud/object-storage/resource_access_key.go @@ -3,6 +3,7 @@ package fptcloud_object_storage import ( "context" "fmt" + "log" common "terraform-provider-fptcloud/commons" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -24,8 +25,8 @@ func ResourceAccessKey() *schema.Resource { "access_key_id": { Type: schema.TypeString, Required: false, - Computed: true, ForceNew: true, + Optional: true, Description: "The access key ID", }, "secret_access_key": { @@ -40,10 +41,17 @@ func ResourceAccessKey() *schema.Resource { ForceNew: true, Description: "The region name to create the access key", }, - "create_access_key_response": { + "status": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: "The status after creating the access key", + }, + "message": { Type: schema.TypeString, - Computed: true, - Description: "The create access key response", + Optional: true, + ForceNew: true, + Description: "The message after creating the access key", }, }, Importer: &schema.ResourceImporter{ @@ -57,23 +65,26 @@ func resourceAccessKeyCreate(ctx context.Context, d *schema.ResourceData, m inte vpcId := d.Get("vpc_id").(string) regionName := d.Get("region_name").(string) s3ServiceDetail := getServiceEnableRegion(service, vpcId, regionName) + if s3ServiceDetail.S3ServiceId == "" { + return diag.FromErr(fmt.Errorf("region %s is not enabled", regionName)) + } + resp := service.CreateAccessKey(vpcId, s3ServiceDetail.S3ServiceId) - var createAccessKeyResponse CreateAccessKeyResponse - if resp.Credential.AccessKey != "" && resp.Credential.SecretKey != "" { - createAccessKeyResponse.Credential.AccessKey = resp.Credential.AccessKey - createAccessKeyResponse.Credential.SecretKey = resp.Credential.SecretKey + + if !resp.Status { + return diag.Errorf(resp.Message) } - if resp.Message != "" { - createAccessKeyResponse.Message = resp.Message + + if resp.Credential.AccessKey != "" { + d.SetId(resp.Credential.AccessKey) + d.Set("access_key_id", resp.Credential.AccessKey) + d.Set("secret_access_key", resp.Credential.SecretKey) } - createAccessKeyResponse.Status = resp.Status - fmt.Println("Create access key response: ", createAccessKeyResponse) - p := fmt.Sprintf("%v", createAccessKeyResponse) - d.Set("access_key_id", createAccessKeyResponse.Credential.AccessKey) - d.Set("secret_access_key", createAccessKeyResponse.Credential.SecretKey) - d.SetId(resp.Credential.AccessKey) - d.Set("create_access_key_response", p) + d.Set("status", resp.Status) + if resp.Message != "" { + d.Set("message", resp.Message) + } return nil } @@ -107,9 +118,38 @@ func resourceAccessKeyDelete(ctx context.Context, d *schema.ResourceData, m inte service := NewObjectStorageService(client) vpcId := d.Get("vpc_id").(string) regionName := d.Get("region_name").(string) - accessKeyId := d.Get("access_key_id").(string) + accessKeyId := d.Id() + if accessKeyId == "" { + // If the access key ID is not set, try to get it from the data source + accessKeyId = d.Get("access_key_id").(string) + } + + log.Printf("[DEBUG] Starting deletion of access key. VPC ID: %s, Region: %s, Access Key ID: %s", + vpcId, regionName, accessKeyId) + s3ServiceDetail := getServiceEnableRegion(service, vpcId, regionName) + if s3ServiceDetail.S3ServiceId == "" { + log.Printf("[ERROR] Region %s is not enabled for VPC %s", regionName, vpcId) + return diag.FromErr(fmt.Errorf("region %s is not enabled", regionName)) + } + + log.Printf("[DEBUG] Found S3 service ID: %s", s3ServiceDetail.S3ServiceId) + + if accessKeyId == "" { + log.Printf("[ERROR] access_key_id is empty") + return diag.Errorf("access_key_id is required for deletion") + } + + log.Printf("[INFO] Attempting to delete access key %s for VPC %s in region %s", + accessKeyId, vpcId, regionName) + + err := service.DeleteAccessKey(vpcId, s3ServiceDetail.S3ServiceId, accessKeyId) + if err != nil { + log.Printf("[ERROR] Failed to delete access key %s: %v", accessKeyId, err) + return diag.FromErr(err) + } - service.DeleteAccessKey(vpcId, s3ServiceDetail.S3ServiceId, accessKeyId) + log.Printf("[INFO] Successfully deleted access key %s", accessKeyId) + d.SetId("") return nil } diff --git a/fptcloud/object-storage/resource_bucket.go b/fptcloud/object-storage/resource_bucket.go index 295b542..3527e6e 100644 --- a/fptcloud/object-storage/resource_bucket.go +++ b/fptcloud/object-storage/resource_bucket.go @@ -35,11 +35,8 @@ func ResourceBucket() *schema.Resource { Description: "The versioning state of the bucket. Accepted values are Enabled or Suspended, default was not set.", }, "region_name": { - Type: schema.TypeString, - Required: false, - // Default: "HCM-02" if not provided - Default: "HCM-02", - Optional: true, + Type: schema.TypeString, + Required: true, ForceNew: true, Description: "The region name that's are the same with the region name in the S3 service.", }, @@ -54,6 +51,12 @@ func ResourceBucket() *schema.Resource { Required: true, ForceNew: true, }, + "status": { + Type: schema.TypeBool, + Computed: true, + ForceNew: true, + Description: "The status after create or delete the bucket", + }, }, } } @@ -73,6 +76,9 @@ func getServiceEnableRegion(objectStorageService ObjectStorageService, vpcId, re break } } + if s3ServiceDetail.S3ServiceId == "" { + return S3ServiceDetail{} + } return s3ServiceDetail } @@ -102,6 +108,9 @@ func resourceBucketRead(_ context.Context, d *schema.ResourceData, m interface{} objectStorageService := NewObjectStorageService(client) vpcId := d.Get("vpc_id").(string) s3ServiceDetail := getServiceEnableRegion(objectStorageService, vpcId, d.Get("region_name").(string)) + if s3ServiceDetail.S3ServiceId == "" { + return diag.FromErr(fmt.Errorf("region %s is not enabled", d.Get("region_name").(string))) + } bucket := objectStorageService.ListBuckets(vpcId, s3ServiceDetail.S3ServiceId, 1, 99999) if bucket.Total == 0 { @@ -109,7 +118,6 @@ func resourceBucketRead(_ context.Context, d *schema.ResourceData, m interface{} } for _, b := range bucket.Buckets { if b.Name == d.Get("name").(string) { - d.SetId(b.Name) d.Set("name", b.Name) return nil } @@ -122,10 +130,14 @@ func resourceBucketDelete(ctx context.Context, d *schema.ResourceData, m interfa vpcId := d.Get("vpc_id").(string) bucketName := d.Get("name").(string) s3ServiceDetail := getServiceEnableRegion(objectStorageService, vpcId, d.Get("region_name").(string)) + if s3ServiceDetail.S3ServiceId == "" { + return diag.FromErr(fmt.Errorf("region %s is not enabled", d.Get("region_name").(string))) + } - err := objectStorageService.DeleteBucket(bucketName, vpcId, s3ServiceDetail.S3ServiceId) - if err != nil { - return diag.FromErr(err) + satus := objectStorageService.DeleteBucket(vpcId, s3ServiceDetail.S3ServiceId, bucketName) + if !satus.Status { + return diag.Errorf("failed to delete bucket %s", bucketName) } + return nil } diff --git a/fptcloud/object-storage/resource_bucket_acl.go b/fptcloud/object-storage/resource_bucket_acl.go new file mode 100644 index 0000000..e6f3cc4 --- /dev/null +++ b/fptcloud/object-storage/resource_bucket_acl.go @@ -0,0 +1,113 @@ +package fptcloud_object_storage + +import ( + "context" + "fmt" + common "terraform-provider-fptcloud/commons" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func ResourceBucketAcl() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceBucketAclCreate, + ReadContext: resourceBucketAclRead, + DeleteContext: resourceBucketAclDelete, + UpdateContext: nil, + Schema: map[string]*schema.Schema{ + "vpc_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The VPC ID", + }, + "bucket_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Name of the bucket to config the ACL", + }, + "region_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The region name where the bucket is located, e.g., HCM-02, can be retrieved when creating the bucket", + }, + "canned_acl": { + Type: schema.TypeString, + Required: true, + Description: "The Access Control List (ACL) status of the bucket which can be one of the following values: private, public-read, default is private", + ForceNew: true, + }, + "apply_objects": { + Type: schema.TypeBool, + Default: false, + ForceNew: true, + Optional: true, + Description: "Apply the ACL to all objects in the bucket", + }, + "status": { + Type: schema.TypeBool, + Computed: true, + ForceNew: true, + Description: "The status after configuring the bucket ACL", + }, + }, + } +} + +func resourceBucketAclCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + client := m.(*common.Client) + service := NewObjectStorageService(client) + vpcId := d.Get("vpc_id").(string) + bucketName := d.Get("bucket_name").(string) + regionName := d.Get("region_name").(string) + cannedAcl := d.Get("canned_acl").(string) + applyObjects := d.Get("apply_objects").(bool) + fmt.Println("applyObjects", applyObjects) + if cannedAcl != "private" && cannedAcl != "public-read" { + return diag.Errorf("canned_acl must be either private or public-read, got %s", cannedAcl) + } + s3ServiceDetail := getServiceEnableRegion(service, vpcId, regionName) + if s3ServiceDetail.S3ServiceId == "" { + return diag.FromErr(fmt.Errorf("region %s is not enabled", regionName)) + } + var bucketAclRequest BucketAclRequest + bucketAclRequest.CannedAcl = cannedAcl + bucketAclRequest.ApplyObjects = applyObjects + + r := service.PutBucketAcl(vpcId, s3ServiceDetail.S3ServiceId, bucketName, bucketAclRequest) + if !r.Status { + d.Set("status", false) + return diag.Errorf("failed to create bucket ACL for bucket %s", bucketName) + } + d.Set("status", true) + return resourceBucketAclRead(ctx, d, m) +} + +func resourceBucketAclRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + client := m.(*common.Client) + service := NewObjectStorageService(client) + vpcId := d.Get("vpc_id").(string) + bucketName := d.Get("bucket_name").(string) + regionName := d.Get("region_name").(string) + s3ServiceDetail := getServiceEnableRegion(service, vpcId, regionName) + if s3ServiceDetail.S3ServiceId == "" { + return diag.FromErr(fmt.Errorf("region %s is not enabled", regionName)) + } + r := service.GetBucketAcl(vpcId, s3ServiceDetail.S3ServiceId, bucketName) + if !r.Status { + return diag.Errorf("failed to get bucket ACL for bucket %s", bucketName) + } + d.Set("canned_acl", r.CannedACL) + d.Set("status", r.Status) + return nil +} + +func resourceBucketAclDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + // Remove the resource from the state + d.SetId("") + fmt.Println("Delete operation is not supported for bucket ACLs. This is a no-op.") + return nil +} diff --git a/fptcloud/object-storage/resource_bucket_cors.go b/fptcloud/object-storage/resource_bucket_cors.go index 51124d3..7b00a03 100644 --- a/fptcloud/object-storage/resource_bucket_cors.go +++ b/fptcloud/object-storage/resource_bucket_cors.go @@ -2,6 +2,7 @@ package fptcloud_object_storage import ( "context" + "fmt" common "terraform-provider-fptcloud/commons" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -76,6 +77,9 @@ func resourceBucketCorsCreate(ctx context.Context, d *schema.ResourceData, m int service := NewObjectStorageService(client) vpcId := d.Get("vpc_id").(string) s3ServiceDetail := getServiceEnableRegion(service, vpcId, d.Get("region_name").(string)) + if s3ServiceDetail.S3ServiceId == "" { + return diag.FromErr(fmt.Errorf("region %s is not enabled", d.Get("region_name").(string))) + } bucketName := d.Get("bucket_name").(string) corsRule := d.Get("cors_rule").([]interface{}) @@ -114,6 +118,9 @@ func resourceBucketCorsUpdate(ctx context.Context, d *schema.ResourceData, m int bucketName := d.Get("bucket_name").(string) vpcId := d.Get("vpc_id").(string) s3ServiceDetail := getServiceEnableRegion(service, vpcId, d.Get("region_name").(string)) + if s3ServiceDetail.S3ServiceId == "" { + return diag.FromErr(fmt.Errorf("region %s is not enabled", d.Get("region_name").(string))) + } corsRule := d.Get("cors_rule").([]interface{}) diff --git a/fptcloud/object-storage/resource_bucket_policy.go b/fptcloud/object-storage/resource_bucket_policy.go index e02b48e..0755538 100644 --- a/fptcloud/object-storage/resource_bucket_policy.go +++ b/fptcloud/object-storage/resource_bucket_policy.go @@ -2,6 +2,7 @@ package fptcloud_object_storage import ( "context" + "fmt" common "terraform-provider-fptcloud/commons" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -28,9 +29,7 @@ func ResourceBucketPolicy() *schema.Resource { }, "region_name": { Type: schema.TypeString, - Required: false, - Default: "HCM-02", - Optional: true, + Required: true, ForceNew: true, Description: "The region name of the bucket", }, @@ -53,6 +52,9 @@ func resourceBucketPolicyCreate(ctx context.Context, d *schema.ResourceData, m i vpcId := d.Get("vpc_id").(string) regionName := d.Get("region_name").(string) s3ServiceDetail := getServiceEnableRegion(service, vpcId, regionName) + if s3ServiceDetail.S3ServiceId == "" { + return diag.FromErr(fmt.Errorf("region %s is not enabled", d.Get("region_name").(string))) + } resp := service.PutBucketPolicy(vpcId, s3ServiceDetail.S3ServiceId, bucketName, BucketPolicyRequest{ Policy: policy, @@ -78,6 +80,9 @@ func resourceBucketPolicyDelete(ctx context.Context, d *schema.ResourceData, m i vpcId := d.Get("vpc_id").(string) regionName := d.Get("region_name").(string) s3ServiceDetail := getServiceEnableRegion(service, vpcId, regionName) + if s3ServiceDetail.S3ServiceId == "" { + return diag.FromErr(fmt.Errorf("region %s is not enabled", d.Get("region_name").(string))) + } resp := service.PutBucketPolicy(vpcId, s3ServiceDetail.S3ServiceId, bucketName, BucketPolicyRequest{ Policy: "", diff --git a/fptcloud/object-storage/resource_bucket_static_website.go b/fptcloud/object-storage/resource_bucket_static_website.go index 56fafe6..c64953a 100644 --- a/fptcloud/object-storage/resource_bucket_static_website.go +++ b/fptcloud/object-storage/resource_bucket_static_website.go @@ -14,7 +14,6 @@ func ResourceBucketStaticWebsite() *schema.Resource { CreateContext: resourceBucketStaticWebsiteCreate, ReadContext: dataSourceBucketStaticWebsite, DeleteContext: resourceDeleteBucketStaticWebsite, - //UpdateContext: nil, Schema: map[string]*schema.Schema{ "bucket_name": { Type: schema.TypeString, @@ -24,9 +23,7 @@ func ResourceBucketStaticWebsite() *schema.Resource { }, "region_name": { Type: schema.TypeString, - Required: false, - Default: "HCM-02", - Optional: true, + Required: true, ForceNew: true, Description: "The region name of the bucket", }, @@ -38,16 +35,18 @@ func ResourceBucketStaticWebsite() *schema.Resource { }, "index_document_suffix": { Type: schema.TypeString, - Optional: true, + Required: false, Default: "index.html", ForceNew: true, + Optional: true, Description: "Suffix that is appended to a request that is for a directory", }, "error_document_key": { Type: schema.TypeString, - Optional: true, + Required: false, Default: "error.html", ForceNew: true, + Optional: true, Description: "The object key name to use when a 4XX class error occurs", }, "status": { @@ -73,12 +72,17 @@ func resourceBucketStaticWebsiteCreate(ctx context.Context, d *schema.ResourceDa indexDocument := d.Get("index_document_suffix").(string) errorDocument := d.Get("error_document_key").(string) s3ServiceDetail := getServiceEnableRegion(service, vpcId, regionName) + if s3ServiceDetail.S3ServiceId == "" { + return diag.FromErr(fmt.Errorf("region %s is not enabled", d.Get("region_name").(string))) + } putBucketWebsite := service.PutBucketWebsite(vpcId, s3ServiceDetail.S3ServiceId, bucketName, BucketWebsiteRequest{ Bucket: bucketName, Suffix: indexDocument, Key: errorDocument, }) - fmt.Println("\n Put bucket website response: \n", putBucketWebsite) + fmt.Println("--------------------------------------- \n:") + fmt.Println("--------------------------------------- \n: ", putBucketWebsite) + fmt.Println("--------------------------------------- \n: ") if !putBucketWebsite.Status { diag.Errorf("failed to create bucket website for bucket %s", bucketName) @@ -87,7 +91,7 @@ func resourceBucketStaticWebsiteCreate(ctx context.Context, d *schema.ResourceDa } d.Set("status", true) d.SetId(bucketName) - return nil + return dataSourceBucketStaticWebsite(ctx, d, m) } func resourceDeleteBucketStaticWebsite(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { @@ -98,6 +102,9 @@ func resourceDeleteBucketStaticWebsite(ctx context.Context, d *schema.ResourceDa vpcId := d.Get("vpc_id").(string) regionName := d.Get("region_name").(string) s3ServiceDetail := getServiceEnableRegion(service, vpcId, regionName) + if s3ServiceDetail.S3ServiceId == "" { + return diag.FromErr(fmt.Errorf("region %s is not enabled", d.Get("region_name").(string))) + } resp := service.DeleteBucketStaticWebsite(vpcId, s3ServiceDetail.S3ServiceId, bucketName) if !resp.Status { diff --git a/fptcloud/object-storage/resource_bucket_versioning.go b/fptcloud/object-storage/resource_bucket_versioning.go index 4f133fa..d388099 100644 --- a/fptcloud/object-storage/resource_bucket_versioning.go +++ b/fptcloud/object-storage/resource_bucket_versioning.go @@ -13,7 +13,7 @@ func ResourceBucketVersioning() *schema.Resource { return &schema.Resource{ CreateContext: resourceBucketVersioningCreate, ReadContext: dataSourceBucketVersioningRead, - DeleteContext: resourceBucketVersioningCreate, + DeleteContext: resourceBucketVersioningDelete, Schema: map[string]*schema.Schema{ "bucket_name": { Type: schema.TypeString, @@ -21,17 +21,15 @@ func ResourceBucketVersioning() *schema.Resource { ForceNew: true, Description: "Name of the bucket", }, - "enabled": { - Type: schema.TypeBool, + "versioning_status": { + Type: schema.TypeString, Required: true, - Description: "Enable or suspend versioning", + Description: "Status of the versioning, must be Enabled or Suspended", ForceNew: true, // Marking this field as ForceNew to ensure that the resource is recreated when the value is changed }, "region_name": { Type: schema.TypeString, - Required: false, - Default: "HCM-02", - Optional: true, + Required: true, ForceNew: true, Description: "The region name of the bucket", }, @@ -50,25 +48,31 @@ func resourceBucketVersioningCreate(ctx context.Context, d *schema.ResourceData, service := NewObjectStorageService(client) bucketName := d.Get("bucket_name").(string) - enabled := d.Get("enabled").(bool) - status := "Suspended" - if enabled { - status = "Enabled" + versioningStatus := d.Get("versioning_status").(string) + if versioningStatus != "Enabled" && versioningStatus != "Suspended" { + return diag.FromErr(fmt.Errorf("versioning status must be Enabled or Suspended")) } vpcId := d.Get("vpc_id").(string) regionName := d.Get("region_name").(string) s3ServiceDetail := getServiceEnableRegion(service, vpcId, regionName) - + if s3ServiceDetail.S3ServiceId == "" { + return diag.FromErr(fmt.Errorf("region %s is not enabled", d.Get("region_name").(string))) + } err := service.PutBucketVersioning(vpcId, s3ServiceDetail.S3ServiceId, bucketName, BucketVersioningRequest{ - Status: status, + Status: versioningStatus, }) if err != nil { return diag.FromErr(err) } + d.SetId(fmt.Sprintf("%s:%s", bucketName, versioningStatus)) + d.Set("versioning_status", versioningStatus) + return nil +} - d.SetId(bucketName) - fmt.Println("Bucket versioning is updated for bucket", bucketName) +func resourceBucketVersioningDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + d.SetId("") + diag.FromErr(fmt.Errorf("deleting bucket versioning is not supported")) return nil } diff --git a/fptcloud/object-storage/resource_sub_user.go b/fptcloud/object-storage/resource_sub_user.go index 0a7b0fd..d4e5cfc 100644 --- a/fptcloud/object-storage/resource_sub_user.go +++ b/fptcloud/object-storage/resource_sub_user.go @@ -2,6 +2,7 @@ package fptcloud_object_storage import ( "context" + "fmt" common "terraform-provider-fptcloud/commons" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -11,7 +12,7 @@ import ( func ResourceSubUser() *schema.Resource { return &schema.Resource{ CreateContext: resourceSubUserCreate, - ReadContext: dataSourceSubUserRead, + ReadContext: dataSourceSubUserDetailRead, DeleteContext: resourceSubUserDelete, Schema: map[string]*schema.Schema{ "role": { @@ -31,9 +32,7 @@ func ResourceSubUser() *schema.Resource { }, "region_name": { Type: schema.TypeString, - Required: false, - Default: "HCM-02", - Optional: true, + Required: true, ForceNew: true, }, }, @@ -43,22 +42,24 @@ func ResourceSubUser() *schema.Resource { func resourceSubUserCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { client := m.(*common.Client) objectStorageService := NewObjectStorageService(client) - + subUserId := d.Get("user_id").(string) vpcId := d.Get("vpc_id").(string) req := SubUser{ Role: d.Get("role").(string), - UserId: d.Get("user_id").(string), + UserId: subUserId, } s3ServiceDetail := getServiceEnableRegion(objectStorageService, vpcId, d.Get("region_name").(string)) + if s3ServiceDetail.S3ServiceId == "" { + return diag.FromErr(fmt.Errorf("region %s is not enabled", d.Get("region_name").(string))) + } - subUser, err := objectStorageService.CreateSubUser(req, vpcId, s3ServiceDetail.S3ServiceId) - if err != nil { - return diag.FromErr(err) + subUser := objectStorageService.CreateSubUser(req, vpcId, s3ServiceDetail.S3ServiceId) + if !subUser.Status { + return diag.FromErr(fmt.Errorf(subUser.Message)) } - d.SetId(subUser.UserId) - d.Set("role", subUser.Role) - return nil + d.SetId(subUserId) + return dataSourceSubUserDetailRead(ctx, d, m) } func resourceSubUserDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { @@ -66,6 +67,9 @@ func resourceSubUserDelete(ctx context.Context, d *schema.ResourceData, m interf objectStorageService := NewObjectStorageService(client) vpcId := d.Get("vpc_id").(string) s3ServiceDetail := getServiceEnableRegion(objectStorageService, vpcId, d.Get("region_name").(string)) + if s3ServiceDetail.S3ServiceId == "" { + return diag.FromErr(fmt.Errorf("region %s is not enabled", d.Get("region_name").(string))) + } err := objectStorageService.DeleteSubUser(d.Id(), vpcId, s3ServiceDetail.S3ServiceId) if err != nil { return diag.FromErr(err) @@ -73,40 +77,3 @@ func resourceSubUserDelete(ctx context.Context, d *schema.ResourceData, m interf return nil } - -func resourceSubUserAccessKeyCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - client := m.(*common.Client) - objectStorageService := NewObjectStorageService(client) - - vpcId := d.Get("vpc_id").(string) - s3ServiceId := d.Get("s3_service_id").(string) - subUserId := d.Get("sub_user_id").(string) - - accessKey := objectStorageService.CreateSubUserAccessKey(vpcId, s3ServiceId, subUserId) - if accessKey == nil { - return diag.Errorf("failed to create sub-user access key") - } - - d.SetId(accessKey.Credential.AccessKey) - d.Set("access_key", accessKey.Credential.AccessKey) - d.Set("secret_key", accessKey.Credential.SecretKey) - - return nil -} - -func resourceSubUserAccessKeyDelete(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - client := m.(*common.Client) - objectStorageService := NewObjectStorageService(client) - - vpcId := d.Get("vpc_id").(string) - s3ServiceId := d.Get("s3_service_id").(string) - subUserId := d.Get("sub_user_id").(string) - accessKeyId := d.Id() - - resp := objectStorageService.DeleteSubUserAccessKey(vpcId, s3ServiceId, subUserId, accessKeyId) - if !resp.Status { - return diag.Errorf("failed to delete sub-user access key") - } - - return nil -} diff --git a/fptcloud/object-storage/resource_sub_user_keys.go b/fptcloud/object-storage/resource_sub_user_keys.go new file mode 100644 index 0000000..7e157e7 --- /dev/null +++ b/fptcloud/object-storage/resource_sub_user_keys.go @@ -0,0 +1,70 @@ +package fptcloud_object_storage + +import ( + "context" + common "terraform-provider-fptcloud/commons" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func ResourceSubUserKeys() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceSubUserAccessKeyCreate, + ReadContext: dataSourceSubUserRead, + DeleteContext: resourceSubUserAccessKeyDelete, + Schema: map[string]*schema.Schema{ + "user_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "vpc_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "region_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} +func resourceSubUserAccessKeyCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + client := m.(*common.Client) + objectStorageService := NewObjectStorageService(client) + + vpcId := d.Get("vpc_id").(string) + s3ServiceId := d.Get("s3_service_id").(string) + subUserId := d.Get("sub_user_id").(string) + + accessKey := objectStorageService.CreateSubUserAccessKey(vpcId, s3ServiceId, subUserId) + if accessKey == nil { + return diag.Errorf("failed to create sub-user access key") + } + + d.SetId(accessKey.Credential.AccessKey) + d.Set("access_key", accessKey.Credential.AccessKey) + d.Set("secret_key", accessKey.Credential.SecretKey) + + return dataSourceSubUserRead(ctx, d, m) +} + +func resourceSubUserAccessKeyDelete(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + client := m.(*common.Client) + objectStorageService := NewObjectStorageService(client) + + vpcId := d.Get("vpc_id").(string) + s3ServiceId := d.Get("s3_service_id").(string) + subUserId := d.Get("sub_user_id").(string) + accessKeyId := d.Id() + + resp := objectStorageService.DeleteSubUserAccessKey(vpcId, s3ServiceId, subUserId, accessKeyId) + if !resp.Status { + return diag.Errorf("failed to delete sub-user access key") + } + + return nil +} diff --git a/fptcloud/provider.go b/fptcloud/provider.go index 34a970b..6703cc0 100644 --- a/fptcloud/provider.go +++ b/fptcloud/provider.go @@ -86,22 +86,24 @@ func Provider() *schema.Provider { "fptcloud_object_storage_sub_user_detail": fptcloud_object_storage.DataSourceSubUserDetail(), }, ResourcesMap: map[string]*schema.Resource{ - "fptcloud_storage": fptcloud_storage.ResourceStorage(), - "fptcloud_ssh_key": fptcloud_ssh.ResourceSSHKey(), - "fptcloud_security_group": fptcloud_security_group.ResourceSecurityGroup(), - "fptcloud_security_group_rule": fptcloud_security_group_rule.ResourceSecurityGroupRule(), - "fptcloud_instance": fptcloud_instance.ResourceInstance(), - "fptcloud_instance_group": fptcloud_instance_group.ResourceInstanceGroup(), - "fptcloud_floating_ip": fptcloud_floating_ip.ResourceFloatingIp(), - "fptcloud_floating_ip_association": fptcloud_floating_ip_association.ResourceFloatingIpAssociation(), - "fptcloud_subnet": fptcloud_subnet.ResourceSubnet(), - "fptcloud_object_storage_bucket": fptcloud_object_storage.ResourceBucket(), - "fptcloud_object_storage_sub_user": fptcloud_object_storage.ResourceSubUser(), - "fptcloud_object_storage_access_key": fptcloud_object_storage.ResourceAccessKey(), - "fptcloud_object_storage_bucket_cors": fptcloud_object_storage.ResourceBucketCors(), - "fptcloud_object_storage_bucket_policy": fptcloud_object_storage.ResourceBucketPolicy(), - "fptcloud_object_storage_bucket_versioning": fptcloud_object_storage.ResourceBucketVersioning(), - "fptcloud_object_storage_static_website": fptcloud_object_storage.ResourceBucketStaticWebsite(), + "fptcloud_storage": fptcloud_storage.ResourceStorage(), + "fptcloud_ssh_key": fptcloud_ssh.ResourceSSHKey(), + "fptcloud_security_group": fptcloud_security_group.ResourceSecurityGroup(), + "fptcloud_security_group_rule": fptcloud_security_group_rule.ResourceSecurityGroupRule(), + "fptcloud_instance": fptcloud_instance.ResourceInstance(), + "fptcloud_instance_group": fptcloud_instance_group.ResourceInstanceGroup(), + "fptcloud_floating_ip": fptcloud_floating_ip.ResourceFloatingIp(), + "fptcloud_floating_ip_association": fptcloud_floating_ip_association.ResourceFloatingIpAssociation(), + "fptcloud_subnet": fptcloud_subnet.ResourceSubnet(), + "fptcloud_object_storage_bucket": fptcloud_object_storage.ResourceBucket(), + "fptcloud_object_storage_sub_user": fptcloud_object_storage.ResourceSubUser(), + "fptcloud_object_storage_access_key": fptcloud_object_storage.ResourceAccessKey(), + "fptcloud_object_storage_bucket_cors": fptcloud_object_storage.ResourceBucketCors(), + "fptcloud_object_storage_bucket_policy": fptcloud_object_storage.ResourceBucketPolicy(), + "fptcloud_object_storage_bucket_versioning": fptcloud_object_storage.ResourceBucketVersioning(), + "fptcloud_object_storage_bucket_static_website": fptcloud_object_storage.ResourceBucketStaticWebsite(), + "fptcloud_object_storage_bucket_acl": fptcloud_object_storage.ResourceBucketAcl(), + "fptcloud_object_storage_sub_user_key": fptcloud_object_storage.ResourceSubUserKeys(), }, ConfigureContextFunc: providerConfigureContext, } From 92b2df3c3cb273c75dbcd5fa654209344e85a4a4 Mon Sep 17 00:00:00 2001 From: hoanglm Date: Wed, 13 Nov 2024 10:07:33 +0700 Subject: [PATCH 3/8] Update: region name validate, fix bucket config [Object Storage] update: change payload list sub user [Object Storage] update: fix lifecycle, acl, access keys data source [Object Storage] update: fix config bucket lifecycle cannot create [Object Storage] fix: fix payload bucket lifecycle [WIP] [Object Storage] fix: bucket cors cannot delete Update: cors example --- commons/api_path.go | 26 +- commons/client.go | 8 +- .../datasource_list_access_keys.tf | 8 + .../datasource_list_bucket_acl.tf | 9 + .../datasource_list_bucket_cors.tf | 11 + .../datasource_list_bucket_lifecycle.tf | 11 + .../datasource_list_bucket_policy.tf | 9 + .../datasource_list_bucket_static_website.tf | 9 + .../datasource_list_bucket_versioning.tf | 9 + .../datasource_list_buckets.tf | 10 + .../datasource_list_enable_services.tf | 18 ++ .../datasource_list_sub_user.tf | 10 + .../resource_bucket_acl.tf | 6 + .../resource_bucket_cors..tf | 23 ++ .../resource_bucket_lifecycle.tf | 12 + .../resource_bucket_policy.tf | 26 ++ .../resource_bucket_static_website.tf | 7 + .../resource_bucket_versioning.tf | 7 + .../resource_sub_user.tf | 6 + .../resource_sub_user_access_key.tf | 5 + .../resource_user_key.tf | 4 + .../dataqsource_object_storage_bucket_acl.go | 159 ++++++++++++ .../datasource_object_storage.go | 190 ++++++++------ .../datasource_object_storage_access_key.go | 77 ++---- .../datasource_object_storage_bucket.go | 93 ++++--- .../datasource_object_storage_bucket_cors.go | 61 ++++- ...asource_object_storage_bucket_lifecycle.go | 197 +++++++++++++++ ...datasource_object_storage_bucket_policy.go | 14 +- ...ce_object_storage_bucket_static_website.go | 71 ++++++ ...ource_object_storage_bucket_versioning.go} | 17 +- .../datasource_object_storage_lifecycle.go | 76 ------ .../datasource_object_storage_region.go | 73 ++++++ ...atasource_object_storage_static_website.go | 144 ----------- .../datasource_object_storage_sub_user.go | 82 ++++--- ...tasource_object_storage_sub_user_detail.go | 6 +- .../object-storage/resource_access_key.go | 13 +- fptcloud/object-storage/resource_bucket.go | 2 +- .../object-storage/resource_bucket_acl.go | 6 +- .../object-storage/resource_bucket_cors.go | 232 +++++++++++------- .../resource_bucket_lifecycle.go | 215 ++++++++++++++++ .../object-storage/resource_bucket_policy.go | 82 +++++-- .../resource_bucket_static_website.go | 8 +- .../resource_bucket_versioning.go | 2 +- fptcloud/object-storage/resource_sub_user.go | 36 ++- .../object-storage/resource_sub_user_keys.go | 94 ++++--- fptcloud/provider.go | 7 +- 46 files changed, 1570 insertions(+), 621 deletions(-) create mode 100644 examples/data-sources/fptcloud_object_storage/datasource_list_access_keys.tf create mode 100644 examples/data-sources/fptcloud_object_storage/datasource_list_bucket_acl.tf create mode 100644 examples/data-sources/fptcloud_object_storage/datasource_list_bucket_cors.tf create mode 100644 examples/data-sources/fptcloud_object_storage/datasource_list_bucket_lifecycle.tf create mode 100644 examples/data-sources/fptcloud_object_storage/datasource_list_bucket_policy.tf create mode 100644 examples/data-sources/fptcloud_object_storage/datasource_list_bucket_static_website.tf create mode 100644 examples/data-sources/fptcloud_object_storage/datasource_list_bucket_versioning.tf create mode 100644 examples/data-sources/fptcloud_object_storage/datasource_list_buckets.tf create mode 100644 examples/data-sources/fptcloud_object_storage/datasource_list_enable_services.tf create mode 100644 examples/data-sources/fptcloud_object_storage/datasource_list_sub_user.tf create mode 100644 examples/resources/fptcloud_object_storage/resource_bucket_acl.tf create mode 100644 examples/resources/fptcloud_object_storage/resource_bucket_cors..tf create mode 100644 examples/resources/fptcloud_object_storage/resource_bucket_lifecycle.tf create mode 100644 examples/resources/fptcloud_object_storage/resource_bucket_policy.tf create mode 100644 examples/resources/fptcloud_object_storage/resource_bucket_static_website.tf create mode 100644 examples/resources/fptcloud_object_storage/resource_bucket_versioning.tf create mode 100644 examples/resources/fptcloud_object_storage/resource_sub_user.tf create mode 100644 examples/resources/fptcloud_object_storage/resource_sub_user_access_key.tf create mode 100644 examples/resources/fptcloud_object_storage/resource_user_key.tf create mode 100644 fptcloud/object-storage/dataqsource_object_storage_bucket_acl.go create mode 100644 fptcloud/object-storage/datasource_object_storage_bucket_lifecycle.go create mode 100644 fptcloud/object-storage/datasource_object_storage_bucket_static_website.go rename fptcloud/object-storage/{datasource_object_storage_versioning.go => datasource_object_storage_bucket_versioning.go} (76%) delete mode 100644 fptcloud/object-storage/datasource_object_storage_lifecycle.go create mode 100644 fptcloud/object-storage/datasource_object_storage_region.go delete mode 100644 fptcloud/object-storage/datasource_object_storage_static_website.go create mode 100644 fptcloud/object-storage/resource_bucket_lifecycle.go diff --git a/commons/api_path.go b/commons/api_path.go index a5b74f3..8938a27 100644 --- a/commons/api_path.go +++ b/commons/api_path.go @@ -80,11 +80,11 @@ var ApiPath = struct { GetBucketVersioning func(vpcId, s3ServiceId, bucketName string) string PutBucketVersioning func(vpcId, s3ServiceId, bucketName string) string // Bucket Lifecycle - GetBucketLifecycle func(vpcId, s3ServiceId, bucketName, page, pageSize string) string + GetBucketLifecycle func(vpcId, s3ServiceId, bucketName string, page, pageSize int) string PutBucketLifecycle func(vpcId, s3ServiceId, bucketName string) string DeleteBucketLifecycle func(vpcId, s3ServiceId, bucketName string) string // Bucket CORS - GetBucketCORS func(vpcId, s3ServiceId, bucketName string) string + GetBucketCORS func(vpcId, s3ServiceId, bucketName string, page, pageSize int) string PutBucketCORS func(vpcId, s3ServiceId, bucketName string) string CreateBucketCors func(vpcId, s3ServiceId, bucketName string) string // Bucket ACL @@ -92,13 +92,13 @@ var ApiPath = struct { PutBucketAcl func(vpcId, s3ServiceId, bucketName string) string // Sub-user - ListSubUsers func(vpcId, s3ServiceId string) string + ListSubUsers func(vpcId, s3ServiceId string, page, pageSize int) string CreateSubUser func(vpcId, s3ServiceId string) string UpdateSubUser func(vpcId, s3ServiceId, subUserId string) string DeleteSubUser func(vpcId, s3ServiceId, subUserId string) string DetailSubUser func(vpcId, s3ServiceId, subUserId string) string CreateSubUserAccessKey func(vpcId, s3ServiceId, subUserId string) string - DeleteSubUserAccessKey func(vpcId, s3ServiceId, subUserId, accessKeyId string) string + DeleteSubUserAccessKey func(vpcId, s3ServiceId, subUserId string) string // Access Key ListAccessKeys func(vpcId, s3ServiceId string) string CreateAccessKey func(vpcId, s3ServiceId string) string @@ -308,18 +308,18 @@ var ApiPath = struct { return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/bucket/%s/delete-config", vpcId, s3ServiceId, bucketName) }, // Bucket Lifecycle - GetBucketLifecycle: func(vpcId, s3ServiceId, bucketName, page, pageSize string) string { - return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/bucket/%s/lifecycles?page=%s&page_size=%s", vpcId, s3ServiceId, bucketName, page, pageSize) + GetBucketLifecycle: func(vpcId, s3ServiceId, bucketName string, page, pageSize int) string { + return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/bucket/%s/lifecycles?page=%d&page_size=%d", vpcId, s3ServiceId, bucketName, page, pageSize) }, PutBucketLifecycle: func(vpcId, s3ServiceId, bucketName string) string { - return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/bucket/%s/create-bucket-lifecycle-configuration`", vpcId, s3ServiceId, bucketName) + return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/bucket/%s/create-bucket-lifecycle-configuration", vpcId, s3ServiceId, bucketName) }, DeleteBucketLifecycle: func(vpcId, s3ServiceId, bucketName string) string { return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/bucket/%s/delete-bucket-lifecycle-configuration", vpcId, s3ServiceId, bucketName) }, // Bucket CORS - GetBucketCORS: func(vpcId, s3ServiceId, bucketName string) string { - return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/bucket/%s/cors", vpcId, s3ServiceId, bucketName) + GetBucketCORS: func(vpcId, s3ServiceId, bucketName string, page, pageSize int) string { + return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/bucket/%s/cors?page=%d&page_size=%d", vpcId, s3ServiceId, bucketName, page, pageSize) }, PutBucketCORS: func(vpcId, s3ServiceId, bucketName string) string { return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/bucket/%s/put-bucket-cors", vpcId, s3ServiceId, bucketName) @@ -335,8 +335,8 @@ var ApiPath = struct { return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/bucket/%s/acl", vpcId, s3ServiceId, bucketName) }, // Sub-user - ListSubUsers: func(vpcId, serviceId string) string { - return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/sub-users/list", vpcId, serviceId) + ListSubUsers: func(vpcId, serviceId string, page, pageSize int) string { + return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/sub-users/list?page=%d&page_size=%d", vpcId, serviceId, page, pageSize) }, CreateSubUser: func(vpcId, s3ServiceId string) string { return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/sub-users/create", vpcId, s3ServiceId) @@ -354,8 +354,8 @@ var ApiPath = struct { CreateSubUserAccessKey: func(vpcId, s3ServiceId, subUserId string) string { return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/sub-users/%s/credentials/create", vpcId, s3ServiceId, subUserId) }, - DeleteSubUserAccessKey: func(vpcId, s3ServiceId, subUserId, accessKeyId string) string { - return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/sub-users/%s/credentials/%s/delete", vpcId, s3ServiceId, subUserId, accessKeyId) + DeleteSubUserAccessKey: func(vpcId, s3ServiceId, subUserId string) string { + return fmt.Sprintf("/v1/vmware/vpc/%s/s3/%s/sub-users/%s/credentials/delete", vpcId, s3ServiceId, subUserId) }, // Access Key diff --git a/commons/client.go b/commons/client.go index 415e88a..aaf6ed4 100644 --- a/commons/client.go +++ b/commons/client.go @@ -5,6 +5,7 @@ import ( "encoding/json" "fmt" "io" + "log" "net/http" "net/http/httptest" "net/url" @@ -112,6 +113,7 @@ func (c *Client) SendRequest(req *http.Request) ([]byte, error) { body, err := io.ReadAll(resp.Body) c.LastJSONResponse = string(body) + log.Printf("[DEBUG] Response: %s - URL: %s", c.LastJSONResponse, resp.Request.URL.String()) if resp.StatusCode >= 300 { return nil, HTTPError{Code: resp.StatusCode, Status: resp.Status, Reason: string(body)} @@ -166,7 +168,6 @@ func (c *Client) SendDeleteRequest(requestURL string) ([]byte, error) { if err != nil { return nil, err } - return c.SendRequest(req) } @@ -175,10 +176,7 @@ func (c *Client) SendDeleteRequestWithBody(requestURL string, params interface{} u := c.PrepareClientURL(requestURL) // we create a new buffer and encode everything to json to send it in the request - jsonValue, err := json.Marshal(params) - if err != nil { - return nil, err - } + jsonValue, _ := json.Marshal(params) req, err := http.NewRequest("DELETE", u.String(), bytes.NewBuffer(jsonValue)) if err != nil { diff --git a/examples/data-sources/fptcloud_object_storage/datasource_list_access_keys.tf b/examples/data-sources/fptcloud_object_storage/datasource_list_access_keys.tf new file mode 100644 index 0000000..0854940 --- /dev/null +++ b/examples/data-sources/fptcloud_object_storage/datasource_list_access_keys.tf @@ -0,0 +1,8 @@ +data "fptcloud_object_storage_access_key" "keys" { + vpc_id = "your_vpc_id" + region_name = "your_region_name" +} +// for raw data and all access keys from region_name will be listed +output "access_key" { + value = data.fptcloud_object_storage_access_key.keys +} diff --git a/examples/data-sources/fptcloud_object_storage/datasource_list_bucket_acl.tf b/examples/data-sources/fptcloud_object_storage/datasource_list_bucket_acl.tf new file mode 100644 index 0000000..673ff5b --- /dev/null +++ b/examples/data-sources/fptcloud_object_storage/datasource_list_bucket_acl.tf @@ -0,0 +1,9 @@ +data "fptcloud_object_storage_bucket_acl" "example_bucket_acl" { + vpc_id = "your_vpc_id" + region_name = "your_region_name" + bucket_name = "your_bucket_name" +} + +output "bucket_acl" { + value = data.fptcloud_object_storage_bucket_acl.example_bucket_acl +} diff --git a/examples/data-sources/fptcloud_object_storage/datasource_list_bucket_cors.tf b/examples/data-sources/fptcloud_object_storage/datasource_list_bucket_cors.tf new file mode 100644 index 0000000..b69910c --- /dev/null +++ b/examples/data-sources/fptcloud_object_storage/datasource_list_bucket_cors.tf @@ -0,0 +1,11 @@ +data "fptcloud_object_storage_bucket_cors" "example_bucket_cors" { + vpc_id = "1b413c55-b752-4183-abad-06c4b5aca6ad" + region_name = "HCM-02" + bucket_name = "hoanglm3-test-terraform-static-website" + page = 1 + page_size = 100 +} + +output "bucket_cors" { + value = data.fptcloud_object_storage_bucket_cors.example_bucket_cors +} diff --git a/examples/data-sources/fptcloud_object_storage/datasource_list_bucket_lifecycle.tf b/examples/data-sources/fptcloud_object_storage/datasource_list_bucket_lifecycle.tf new file mode 100644 index 0000000..177ef39 --- /dev/null +++ b/examples/data-sources/fptcloud_object_storage/datasource_list_bucket_lifecycle.tf @@ -0,0 +1,11 @@ +data "fptcloud_object_storage_bucket_lifecycle" "example_bucket_lifecycle" { + vpc_id = "your_vpc_id" + region_name = "your_region_name" + bucket_name = "your_bucket_name" + page = 1 + page_size = 100 +} + +output "bucket_lifecycle" { + value = data.fptcloud_object_storage_bucket_lifecycle.example_bucket_lifecycle +} diff --git a/examples/data-sources/fptcloud_object_storage/datasource_list_bucket_policy.tf b/examples/data-sources/fptcloud_object_storage/datasource_list_bucket_policy.tf new file mode 100644 index 0000000..7d2f678 --- /dev/null +++ b/examples/data-sources/fptcloud_object_storage/datasource_list_bucket_policy.tf @@ -0,0 +1,9 @@ +data "fptcloud_object_storage_bucket_policy" "example_bucket_policy" { + vpc_id = "your_vpc_id" + region_name = "your_region_name" + bucket_name = "your_bucket_name" +} + +output "bucket_policy" { + value = data.fptcloud_object_storage_bucket_policy.example_bucket_policy.policy +} diff --git a/examples/data-sources/fptcloud_object_storage/datasource_list_bucket_static_website.tf b/examples/data-sources/fptcloud_object_storage/datasource_list_bucket_static_website.tf new file mode 100644 index 0000000..21484aa --- /dev/null +++ b/examples/data-sources/fptcloud_object_storage/datasource_list_bucket_static_website.tf @@ -0,0 +1,9 @@ +data "fptcloud_object_storage_bucket_static_website" "example_bucket_static_website" { + vpc_id = "your_vpc_id" + region_name = "your_region_name" + bucket_name = "your_bucket_name" +} + +output "bucket_static_website" { + value = data.fptcloud_object_storage_bucket_static_website.example_bucket_static_website +} diff --git a/examples/data-sources/fptcloud_object_storage/datasource_list_bucket_versioning.tf b/examples/data-sources/fptcloud_object_storage/datasource_list_bucket_versioning.tf new file mode 100644 index 0000000..f32ab2a --- /dev/null +++ b/examples/data-sources/fptcloud_object_storage/datasource_list_bucket_versioning.tf @@ -0,0 +1,9 @@ +data "fptcloud_object_storage_bucket_versioning" "example_bucket_versioning" { + vpc_id = "your_vpc_id" + region_name = "your_region_name" + bucket_name = "your_bucket_name" +} + +output "bucket_versioning" { + value = data.fptcloud_object_storage_bucket_versioning.example_bucket_versioning +} diff --git a/examples/data-sources/fptcloud_object_storage/datasource_list_buckets.tf b/examples/data-sources/fptcloud_object_storage/datasource_list_buckets.tf new file mode 100644 index 0000000..6eb5940 --- /dev/null +++ b/examples/data-sources/fptcloud_object_storage/datasource_list_buckets.tf @@ -0,0 +1,10 @@ +data "fptcloud_object_storage_bucket" "hoanglm32" { + vpc_id = "your_vpc_id" + page = 1 + page_size = 100000 + region_name = "your_region_name" +} +// for raw data and all buckets will be listed +output "name" { + value = data.fptcloud_object_storage_bucket.hoanglm32.list_bucket_result +} diff --git a/examples/data-sources/fptcloud_object_storage/datasource_list_enable_services.tf b/examples/data-sources/fptcloud_object_storage/datasource_list_enable_services.tf new file mode 100644 index 0000000..e36a31d --- /dev/null +++ b/examples/data-sources/fptcloud_object_storage/datasource_list_enable_services.tf @@ -0,0 +1,18 @@ +data "fptcloud_s3_service_enable" "hoanglm32" { + vpc_id = "your_vpc_id" +} +// All regions formatted +output "all_regions_formatted" { + value = { + for region in data.fptcloud_s3_service_enable.hoanglm32.s3_enable_services : + region.s3_service_name => { + id = region.s3_service_id + platform = region.s3_platform + region_name = region.s3_service_name + } + } +} +// Region name only, * for all if you want specific index, use [0], [1], ... +output "region_name" { + value = data.fptcloud_s3_service_enable.hoanglm32.s3_enable_services[*].s3_service_name +} diff --git a/examples/data-sources/fptcloud_object_storage/datasource_list_sub_user.tf b/examples/data-sources/fptcloud_object_storage/datasource_list_sub_user.tf new file mode 100644 index 0000000..9f3c125 --- /dev/null +++ b/examples/data-sources/fptcloud_object_storage/datasource_list_sub_user.tf @@ -0,0 +1,10 @@ +data "fptcloud_object_storage_sub_user" "hoanglm32" { + vpc_id = "your_vpc_id" + page = 1 + page_size = 100000 + region_name = "your_region_name" +} +// for raw data and all sub users will be listed +output "list_sub_user" { + value = data.fptcloud_object_storage_sub_user.hoanglm32.list_sub_user +} diff --git a/examples/resources/fptcloud_object_storage/resource_bucket_acl.tf b/examples/resources/fptcloud_object_storage/resource_bucket_acl.tf new file mode 100644 index 0000000..6f8f5bf --- /dev/null +++ b/examples/resources/fptcloud_object_storage/resource_bucket_acl.tf @@ -0,0 +1,6 @@ +resource "fptcloud_object_storage_bucket_acl" "bucket_acl" { + vpc_id = "your_vpc_id" + region_name = "your_bucket_region" + bucket_name = "your_bucket_name" + canned_acl = "private" +} diff --git a/examples/resources/fptcloud_object_storage/resource_bucket_cors..tf b/examples/resources/fptcloud_object_storage/resource_bucket_cors..tf new file mode 100644 index 0000000..270d7ca --- /dev/null +++ b/examples/resources/fptcloud_object_storage/resource_bucket_cors..tf @@ -0,0 +1,23 @@ +resource "fptcloud_object_storage_bucket_cors" "example_bucket_cors" { + vpc_id = "1b413c55-b752-4183-abad-06c4b5aca6ad" + region_name = "HCM-02" + bucket_name = "a-hoanglm32-test" + + # Option 1: Load cors config from file + cors_config_file = file("${path.module}/your_bucket_cors_config.json") + + # Option 2: Inline cors_config + # cors_config = jsonencode({ + # { + # "ID": "a9099", + # "AllowedOrigins": ["http://www.example.com", "http://www.example2.com"], + # "AllowedMethods": ["GET", "PUT", "DELETE"], + # "MaxAgeSeconds": 3000, + # "ExposeHeaders": ["Etag", "x-amz"], + # "AllowedHeaders": ["*", "demo"] + # } + # }) +} +output "bucket_cors" { + value = fptcloud_object_storage_bucket_cors.example_bucket_cors.status +} diff --git a/examples/resources/fptcloud_object_storage/resource_bucket_lifecycle.tf b/examples/resources/fptcloud_object_storage/resource_bucket_lifecycle.tf new file mode 100644 index 0000000..22b80cb --- /dev/null +++ b/examples/resources/fptcloud_object_storage/resource_bucket_lifecycle.tf @@ -0,0 +1,12 @@ +resource "fptcloud_object_storage_bucket_lifecycle" "example_bucket_lifecycle" { + bucket_name = "your_bucket_name" + region_name = "your_region_name" + vpc_id = "your_vpc_id" + + # Option 1: Load policy from file + life_cycle_rule_file = file("${path.module}/your_bucket_lifecycle.json") +} + +output "bucket_lifecycle" { + value = fptcloud_object_storage_bucket_lifecycle.example_bucket_lifecycle +} diff --git a/examples/resources/fptcloud_object_storage/resource_bucket_policy.tf b/examples/resources/fptcloud_object_storage/resource_bucket_policy.tf new file mode 100644 index 0000000..667aec5 --- /dev/null +++ b/examples/resources/fptcloud_object_storage/resource_bucket_policy.tf @@ -0,0 +1,26 @@ +resource "fptcloud_object_storage_bucket_policy" "example_bucket_policy" { + vpc_id = "your_vpc_id" + region_name = "your_region_name" + bucket_name = "your_bucket_name" + + // Option 1: Load policy from file + policy_file = file("${path.module}/your_bucket_policy_json_content.json") + + // Option 2: Inline policy + // policy = jsonencode({ + // Version = "2012-10-17" + // Statement = [ + // { + // Sid = "PublicReadGetObject" + // Effect = "Allow" + // Principal = "*" + // Action = "s3:GetObject" + // Resource = "arn:aws:s3:::example-bucket/*" + // } + // ] + // }) +} +// NOTE: In case wanna delete bucket policy, just ignore policy_file and policy fields +output "bucket_policy" { + value = fptcloud_object_storage_bucket_policy.example_bucket_policy.status +} diff --git a/examples/resources/fptcloud_object_storage/resource_bucket_static_website.tf b/examples/resources/fptcloud_object_storage/resource_bucket_static_website.tf new file mode 100644 index 0000000..e453d72 --- /dev/null +++ b/examples/resources/fptcloud_object_storage/resource_bucket_static_website.tf @@ -0,0 +1,7 @@ +resource "fptcloud_object_storage_bucket_static_website" "example_static_website" { + vpc_id = "your_vpc_id" + region_name = "your_region" + bucket_name = "your_bucket_name" + index_document_suffix = "your_index_document_suffix" + error_document_key = "your_error_document_suffix" +} diff --git a/examples/resources/fptcloud_object_storage/resource_bucket_versioning.tf b/examples/resources/fptcloud_object_storage/resource_bucket_versioning.tf new file mode 100644 index 0000000..d55d0ab --- /dev/null +++ b/examples/resources/fptcloud_object_storage/resource_bucket_versioning.tf @@ -0,0 +1,7 @@ +resource "fptcloud_object_storage_bucket_versioning" "versioning" { + vpc_id = "your_vpc_id" + region_name = "your_bucket_region" + bucket_name = "your_bucket_name" + versioning_status = "Suspended" // or "Enabled" +} + diff --git a/examples/resources/fptcloud_object_storage/resource_sub_user.tf b/examples/resources/fptcloud_object_storage/resource_sub_user.tf new file mode 100644 index 0000000..15f3f26 --- /dev/null +++ b/examples/resources/fptcloud_object_storage/resource_sub_user.tf @@ -0,0 +1,6 @@ +resource "fptcloud_object_storage_sub_user" "example" { + vpc_id = "your_vpc_id" + region_name = "your_region_name" + user_id = "your_user_id" + role = "your_role" +} diff --git a/examples/resources/fptcloud_object_storage/resource_sub_user_access_key.tf b/examples/resources/fptcloud_object_storage/resource_sub_user_access_key.tf new file mode 100644 index 0000000..39f2c6d --- /dev/null +++ b/examples/resources/fptcloud_object_storage/resource_sub_user_access_key.tf @@ -0,0 +1,5 @@ +resource "fptcloud_object_storage_user_key" "test_key" { + vpc_id = "your_vpc_id" + region_name = "your_region_name" + user_id = "your_user_id" +} diff --git a/examples/resources/fptcloud_object_storage/resource_user_key.tf b/examples/resources/fptcloud_object_storage/resource_user_key.tf new file mode 100644 index 0000000..d74d4e7 --- /dev/null +++ b/examples/resources/fptcloud_object_storage/resource_user_key.tf @@ -0,0 +1,4 @@ +resource "fptcloud_object_storage_access_key" "test_key" { + vpc_id = "your_vpc_id" + region_name = "your_region_name" +} diff --git a/fptcloud/object-storage/dataqsource_object_storage_bucket_acl.go b/fptcloud/object-storage/dataqsource_object_storage_bucket_acl.go new file mode 100644 index 0000000..49ea7b2 --- /dev/null +++ b/fptcloud/object-storage/dataqsource_object_storage_bucket_acl.go @@ -0,0 +1,159 @@ +package fptcloud_object_storage + +import ( + "context" + "fmt" + common "terraform-provider-fptcloud/commons" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func DataSourceBucketAcl() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceBucketAclRead, + Schema: map[string]*schema.Schema{ + "vpc_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The VPC ID", + }, + "bucket_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Name of the bucket to config the ACL", + }, + "region_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The region name that's are the same with the region name in the S3 service. Currently, we have: HCM-01, HCM-02, HN-01, HN-02", + }, + "canned_acl": { + Type: schema.TypeString, + Computed: true, + Description: "The Access Control List (ACL) status of the bucket which can be one of the following values: private, public-read, default is private", + }, + "status": { + Type: schema.TypeBool, + Computed: true, + Description: "The status after configuring the bucket ACL", + }, + "bucket_acl": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "owner": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "display_name": { + Type: schema.TypeString, + Computed: true, + }, + "id": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "grants": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "grantee": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "display_name": { + Type: schema.TypeString, + Computed: true, + }, + "id": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "permission": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func dataSourceBucketAclRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + client := m.(*common.Client) + service := NewObjectStorageService(client) + vpcId := d.Get("vpc_id").(string) + bucketName := d.Get("bucket_name").(string) + regionName := d.Get("region_name").(string) + s3ServiceDetail := getServiceEnableRegion(service, vpcId, regionName) + if s3ServiceDetail.S3ServiceId == "" { + return diag.FromErr(fmt.Errorf("region %s is not enabled", regionName)) + } + r := service.GetBucketAcl(vpcId, s3ServiceDetail.S3ServiceId, bucketName) + if !r.Status { + return diag.Errorf("failed to get bucket ACL for bucket %s", bucketName) + } + bucketAcl := []interface{}{ + map[string]interface{}{ + "owner": []interface{}{ + map[string]interface{}{ + "display_name": r.Owner.DisplayName, + "id": r.Owner.ID, + }, + }, + "grants": func() []interface{} { + grants := make([]interface{}, len(r.Grants)) + for i, grant := range r.Grants { + grants[i] = map[string]interface{}{ + "grantee": []interface{}{ + map[string]interface{}{ + "display_name": grant.Grantee.DisplayName, + "id": grant.Grantee.ID, + "type": grant.Grantee.Type, + }, + }, + "permission": grant.Permission, + } + } + return grants + }(), + }, + } + d.SetId(bucketName) + if err := d.Set("bucket_acl", bucketAcl); err != nil { + d.SetId("") + return diag.FromErr(err) + } + if err := d.Set("canned_acl", r.CannedACL); err != nil { + d.SetId("") + return diag.FromErr(err) + } + if err := d.Set("status", r.Status); err != nil { + d.SetId("") + return diag.FromErr(err) + } + + return nil +} diff --git a/fptcloud/object-storage/datasource_object_storage.go b/fptcloud/object-storage/datasource_object_storage.go index f813054..187c249 100644 --- a/fptcloud/object-storage/datasource_object_storage.go +++ b/fptcloud/object-storage/datasource_object_storage.go @@ -4,7 +4,6 @@ import ( "encoding/json" "fmt" common "terraform-provider-fptcloud/commons" - "time" ) // SubUserCreateRequest represents the request body for creating a sub-user @@ -35,13 +34,14 @@ type CreateAccessKeyResponse struct { } `json:"credential,omitempty"` } type SubUserCreateKeyResponse struct { - Status bool `json:"status"` + Status bool `json:"status"` + Message string `json:"message,omitempty"` Credential struct { - AccessKey string `json:"accessKey"` - SecretKey string `json:"secretKey"` - Active interface{} `json:"active"` - CreatedDate interface{} `json:"createdDate"` - } `json:"credential"` + AccessKey string `json:"accessKey,omitempty"` + SecretKey string `json:"secretKey,omitempty"` + Active interface{} `json:"active,omitempty"` + CreatedDate interface{} `json:"createdDate,omitempty"` + } `json:"credential,omitempty"` } type SubUser struct { @@ -67,17 +67,24 @@ type CorsRule struct { ID string `json:"ID,omitempty"` AllowedOrigins []string `json:"AllowedOrigins"` AllowedMethods []string `json:"AllowedMethods"` - MaxAgeSeconds int `json:"MaxAgeSeconds,omitempty"` ExposeHeaders []string `json:"ExposeHeaders,omitempty"` AllowedHeaders []string `json:"AllowedHeaders,omitempty"` + MaxAgeSeconds int `json:"MaxAgeSeconds"` } type BucketCors struct { CorsRules []CorsRule `json:"CORSRules"` } type BucketCorsResponse struct { - Status bool `json:"status"` - Total int `json:"total"` - CorsRules []CorsRule `json:"cors_rules"` + Status bool `json:"status"` + CorsRules []struct { + ID string `json:"ID"` + AllowedHeaders []string `json:"AllowedHeaders,omitempty"` + AllowedMethods []string `json:"AllowedMethods"` + AllowedOrigins []string `json:"AllowedOrigins"` + ExposeHeaders []string `json:"ExposeHeaders,omitempty"` + MaxAgeSeconds int `json:"MaxAgeSeconds"` + } `json:"cors_rules"` + Total int `json:"total"` } type BucketPolicyResponse struct { @@ -98,6 +105,10 @@ type Statement struct { type BucketVersioningRequest struct { Status string `json:"status"` // "Enabled" or "Suspended" } +type BucketVersioningResponse struct { + Status bool `json:"status"` + Config string `json:"config"` // "Enabled" or "Suspended" +} type BucketAclResponse struct { Status bool `json:"status"` @@ -177,12 +188,12 @@ type BucketRequest struct { } type ListBucketResponse struct { Buckets []struct { - Name string `json:"Name"` - CreationDate time.Time `json:"CreationDate"` - IsEmpty bool `json:"isEmpty"` - S3ServiceID string `json:"s3_service_id"` - IsEnabledLogging bool `json:"isEnabledLogging"` - Endpoint string `json:"endpoint"` + Name string `json:"Name"` + CreationDate string `json:"CreationDate"` + IsEmpty bool `json:"isEmpty"` + S3ServiceID string `json:"s3_service_id"` + IsEnabledLogging bool `json:"isEnabledLogging"` + Endpoint string `json:"endpoint"` } `json:"buckets"` Total int `json:"total"` } @@ -190,16 +201,21 @@ type BucketLifecycleResponse struct { Status bool `json:"status"` Rules []struct { Expiration struct { - Days int `json:"Days"` + ExpiredObjectDeleteMarker bool `json:"ExpiredObjectDeleteMarker,omitempty"` + Days int `json:"Days,omitempty"` } `json:"Expiration"` ID string `json:"ID"` Filter struct { Prefix string `json:"Prefix"` - } `json:"Filter"` - Status string `json:"Status"` + } `json:"Filter,omitempty"` + Status string `json:"Status"` + NoncurrentVersionExpiration struct { + NoncurrentDays int `json:"NoncurrentDays"` + } `json:"NoncurrentVersionExpiration"` AbortIncompleteMultipartUpload struct { DaysAfterInitiation int `json:"DaysAfterInitiation"` } `json:"AbortIncompleteMultipartUpload"` + Prefix string `json:"Prefix,omitempty"` } `json:"rules"` Total int `json:"total"` } @@ -213,6 +229,31 @@ type DetailSubUser struct { AccessKeys []string `json:"access_keys"` } +type S3BucketLifecycleConfig struct { + ID string `json:"ID"` + Filter Filter `json:"Filter"` + Expiration Expiration `json:"Expiration"` + NoncurrentVersionExpiration NoncurrentVersionExpiration `json:"NoncurrentVersionExpiration"` + AbortIncompleteMultipartUpload AbortIncompleteMultipartUpload `json:"AbortIncompleteMultipartUpload"` +} + +type Filter struct { + Prefix string `json:"Prefix"` +} + +type Expiration struct { + Days int `json:"Days,omitempty"` + ExpiredObjectDeleteMarker bool `json:"ExpiredObjectDeleteMarker,omitempty"` +} + +type NoncurrentVersionExpiration struct { + NoncurrentDays int `json:"NoncurrentDays"` +} + +type AbortIncompleteMultipartUpload struct { + DaysAfterInitiation int `json:"DaysAfterInitiation"` +} + // ObjectStorageService defines the interface for object storage operations type ObjectStorageService interface { CheckServiceEnable(vpcId string) S3ServiceEnableResponse @@ -230,23 +271,23 @@ type ObjectStorageService interface { // Sub user CreateSubUser(req SubUser, vpcId, s3ServiceId string) *CommonResponse DeleteSubUser(vpcId, s3ServiceId, subUserId string) error - ListSubUsers(vpcId, s3ServiceId string) ([]SubUserListResponse, error) + ListSubUsers(vpcId, s3ServiceId string, page, pageSize int) (SubUserListResponse, error) DetailSubUser(vpcId, s3ServiceId, subUserId string) *DetailSubUser CreateSubUserAccessKey(vpcId, s3ServiceId, subUserId string) *SubUserCreateKeyResponse DeleteSubUserAccessKey(vpcId, s3ServiceId, subUserId, accessKeyId string) CommonResponse // bucket configuration - PutBucketPolicy(vpcId, s3ServiceId, bucketName string, policy BucketPolicyRequest) CommonResponse + PutBucketPolicy(vpcId, s3ServiceId, bucketName string, policy interface{}) CommonResponse GetBucketPolicy(vpcId, s3ServiceId, bucketName string) *BucketPolicyResponse // CORS configuration - PutBucketCors(bucketName, vpcId, s3ServiceId string, cors CorsRule) (CommonResponse, error) - UpdateBucketCors(bucketName, vpcId, s3ServiceId string, cors BucketCors) (CommonResponse, error) - GetBucketCors(vpcId, s3ServiceId, bucketName string) (*BucketCors, error) + CreateBucketCors(vpcId, s3ServiceId, bucketName string, cors map[string]interface{}) CommonResponse + UpdateBucketCors(vpcId, s3ServiceId, bucketName string, cors []map[string]interface{}) CommonResponse + GetBucketCors(vpcId, s3ServiceId, bucketName string, page, pageSize int) (*BucketCorsResponse, error) // Versioning configuration PutBucketVersioning(vpcId, s3ServiceId, bucketName string, versioning BucketVersioningRequest) error - GetBucketVersioning(vpcId, s3ServiceId, bucketName string) *BucketVersioningRequest + GetBucketVersioning(vpcId, s3ServiceId, bucketName string) *BucketVersioningResponse // Acl configuration PutBucketAcl(vpcId, s3ServiceId, bucketName string, acl BucketAclRequest) PutBucketAclResponse @@ -258,9 +299,9 @@ type ObjectStorageService interface { DeleteBucketStaticWebsite(vpcId, s3ServiceId, bucketName string) CommonResponse // Lifecycle configuration - GetBucketLifecycle(vpcId, s3ServiceId, bucketName, page, pageSize string) (*BucketLifecycleResponse, error) - PutBucketLifecycle(vpcId, s3ServiceId, bucketName string, lifecycle interface{}) (*BucketLifecycleResponse, error) - DeleteBucketLifecycle(vpcId, s3ServiceId, bucketName string, lifecycle interface{}) (*BucketLifecycleResponse, error) + GetBucketLifecycle(vpcId, s3ServiceId, bucketName string, page, pageSize int) BucketLifecycleResponse + PutBucketLifecycle(vpcId, s3ServiceId, bucketName string, lifecycle map[string]interface{}) CommonResponse + DeleteBucketLifecycle(vpcId, s3ServiceId, bucketName string, lifecycle map[string]interface{}) CommonResponse } // ObjectStorageServiceImpl is the implementation of ObjectStorageService @@ -353,17 +394,17 @@ func (s *ObjectStorageServiceImpl) ListBuckets(vpcId, s3ServiceId string, page, return buckets } -func (s *ObjectStorageServiceImpl) ListSubUsers(vpcId, s3ServiceId string) ([]SubUserListResponse, error) { - apiPath := common.ApiPath.ListSubUsers(vpcId, s3ServiceId) +func (s *ObjectStorageServiceImpl) ListSubUsers(vpcId, s3ServiceId string, page, pageSize int) (SubUserListResponse, error) { + apiPath := common.ApiPath.ListSubUsers(vpcId, s3ServiceId, page, pageSize) resp, err := s.client.SendGetRequest(apiPath) if err != nil { - return nil, fmt.Errorf("failed to list sub-users: %v", err) + return SubUserListResponse{Total: 0}, fmt.Errorf("failed to list sub-users: %v", err) } - var subUsers []SubUserListResponse + var subUsers SubUserListResponse err = json.Unmarshal(resp, &subUsers) if err != nil { - return nil, fmt.Errorf("failed to unmarshal sub-user list response: %v", err) + return SubUserListResponse{Total: 0}, fmt.Errorf("failed to unmarshal sub-user list response: %v", err) } return subUsers, nil @@ -376,13 +417,13 @@ func (s *ObjectStorageServiceImpl) ListAccessKeys(vpcId, s3ServiceId string) (Ac return AccessKey{}, fmt.Errorf("failed to list access keys: %v", err) } - var accessKeys AccessKey - err = json.Unmarshal(resp, &accessKeys) + var accessKey AccessKey + err = json.Unmarshal(resp, &accessKey) if err != nil { return AccessKey{}, fmt.Errorf("failed to unmarshal access key list response: %v", err) } - return accessKeys, nil + return accessKey, nil } func (s *ObjectStorageServiceImpl) DeleteBucket(vpcId, s3ServiceId, bucketName string) CommonResponse { @@ -406,7 +447,7 @@ func (s *ObjectStorageServiceImpl) DeleteAccessKey(vpcId, s3ServiceId, accessKey } // Implement bucket policy methods -func (s *ObjectStorageServiceImpl) PutBucketPolicy(vpcId, s3ServiceId, bucketName string, policy BucketPolicyRequest) CommonResponse { +func (s *ObjectStorageServiceImpl) PutBucketPolicy(vpcId, s3ServiceId, bucketName string, policy interface{}) CommonResponse { apiPath := common.ApiPath.PutBucketPolicy(vpcId, s3ServiceId, bucketName) if _, err := s.client.SendPutRequest(apiPath, policy); err != nil { return CommonResponse{Status: false} @@ -429,30 +470,30 @@ func (s *ObjectStorageServiceImpl) GetBucketPolicy(vpcId, s3ServiceId, bucketNam } // Implement CORS methods -func (s *ObjectStorageServiceImpl) PutBucketCors(bucketName, vpcId, s3ServiceId string, cors CorsRule) (CommonResponse, error) { - apiPath := common.ApiPath.PutBucketCORS(vpcId, s3ServiceId, bucketName) - if _, err := s.client.SendPutRequest(apiPath, cors); err != nil { - return CommonResponse{Status: false}, fmt.Errorf("failed to update bucket CORS: %v", err) +func (s *ObjectStorageServiceImpl) CreateBucketCors(vpcId, s3ServiceId, bucketName string, cors map[string]interface{}) CommonResponse { + apiPath := common.ApiPath.CreateBucketCors(vpcId, s3ServiceId, bucketName) + if _, err := s.client.SendPostRequest(apiPath, cors); err != nil { + return CommonResponse{Status: false, Message: err.Error()} } - return CommonResponse{Status: true}, nil + return CommonResponse{Status: true, Message: "Bucket CORS configuration updated successfully"} } -func (s *ObjectStorageServiceImpl) UpdateBucketCors(bucketName, vpcId, s3ServiceId string, cors BucketCors) (CommonResponse, error) { +func (s *ObjectStorageServiceImpl) UpdateBucketCors(vpcId, s3ServiceId, bucketName string, cors []map[string]interface{}) CommonResponse { apiPath := common.ApiPath.PutBucketCORS(vpcId, s3ServiceId, bucketName) if _, err := s.client.SendPutRequest(apiPath, cors); err != nil { - return CommonResponse{Status: false}, fmt.Errorf("failed to update bucket CORS: %v", err) + return CommonResponse{Status: false, Message: err.Error()} } - return CommonResponse{Status: true}, nil + return CommonResponse{Status: true, Message: "Bucket CORS configuration updated successfully"} } -func (s *ObjectStorageServiceImpl) GetBucketCors(vpcId, s3ServiceId, bucketName string) (*BucketCors, error) { - apiPath := common.ApiPath.GetBucketCORS(vpcId, s3ServiceId, bucketName) +func (s *ObjectStorageServiceImpl) GetBucketCors(vpcId, s3ServiceId, bucketName string, page, pageSize int) (*BucketCorsResponse, error) { + apiPath := common.ApiPath.GetBucketCORS(vpcId, s3ServiceId, bucketName, page, pageSize) resp, err := s.client.SendGetRequest(apiPath) if err != nil { return nil, fmt.Errorf("failed to get bucket CORS: %v", err) } - var cors BucketCors + var cors BucketCorsResponse if err := json.Unmarshal(resp, &cors); err != nil { return nil, fmt.Errorf("failed to unmarshal bucket CORS: %v", err) } @@ -468,16 +509,16 @@ func (s *ObjectStorageServiceImpl) PutBucketVersioning(vpcId, s3ServiceId, bucke return nil } -func (s *ObjectStorageServiceImpl) GetBucketVersioning(vpcId, s3ServiceId, bucketName string) *BucketVersioningRequest { +func (s *ObjectStorageServiceImpl) GetBucketVersioning(vpcId, s3ServiceId, bucketName string) *BucketVersioningResponse { apiPath := common.ApiPath.GetBucketVersioning(vpcId, s3ServiceId, bucketName) resp, err := s.client.SendGetRequest(apiPath) if err != nil { - return &BucketVersioningRequest{} + return &BucketVersioningResponse{Status: false} } - var versioning BucketVersioningRequest + var versioning BucketVersioningResponse if err := json.Unmarshal(resp, &versioning); err != nil { - return &BucketVersioningRequest{} + return &BucketVersioningResponse{Status: false} } return &versioning } @@ -546,68 +587,69 @@ func (s *ObjectStorageServiceImpl) DeleteSubUser(vpcId, s3ServiceId, subUserId s return nil } -func (s *ObjectStorageServiceImpl) GetBucketLifecycle(vpcId, s3ServiceId, bucketName, page, pageSize string) (*BucketLifecycleResponse, error) { +func (s *ObjectStorageServiceImpl) GetBucketLifecycle(vpcId, s3ServiceId, bucketName string, page, pageSize int) BucketLifecycleResponse { apiPath := common.ApiPath.GetBucketLifecycle(vpcId, s3ServiceId, bucketName, page, pageSize) resp, err := s.client.SendGetRequest(apiPath) if err != nil { - return nil, fmt.Errorf("failed to get bucket lifecycle: %v", err) + return BucketLifecycleResponse{Total: 0, Status: false} } var bucketLifecycle BucketLifecycleResponse if err := json.Unmarshal(resp, &bucketLifecycle); err != nil { - return nil, fmt.Errorf("failed to unmarshal bucket lifecycle: %v", err) + return BucketLifecycleResponse{Total: 0, Status: false} } - return &bucketLifecycle, nil + return bucketLifecycle } -func (s *ObjectStorageServiceImpl) PutBucketLifecycle(vpcId, s3ServiceId, bucketName string, lifecycle interface{}) (*BucketLifecycleResponse, error) { +func (s *ObjectStorageServiceImpl) PutBucketLifecycle(vpcId, s3ServiceId, bucketName string, lifecycle map[string]interface{}) CommonResponse { apiPath := common.ApiPath.PutBucketLifecycle(vpcId, s3ServiceId, bucketName) - resp, err := s.client.SendPutRequest(apiPath, lifecycle) + resp, err := s.client.SendPostRequest(apiPath, lifecycle) if err != nil { - return nil, fmt.Errorf("failed to put bucket lifecycle: %v", err) + return CommonResponse{Status: false, Message: err.Error()} } - var bucketLifecycle BucketLifecycleResponse + var bucketLifecycle CommonResponse if err := json.Unmarshal(resp, &bucketLifecycle); err != nil { - return nil, fmt.Errorf("failed to unmarshal bucket lifecycle: %v", err) + return CommonResponse{Status: false, Message: err.Error()} } - return &bucketLifecycle, nil + return bucketLifecycle } -func (s *ObjectStorageServiceImpl) DeleteBucketLifecycle(vpcId, s3ServiceId, bucketName string, lifecycle interface{}) (*BucketLifecycleResponse, error) { +func (s *ObjectStorageServiceImpl) DeleteBucketLifecycle(vpcId, s3ServiceId, bucketName string, lifecycle map[string]interface{}) CommonResponse { apiPath := common.ApiPath.DeleteBucketLifecycle(vpcId, s3ServiceId, bucketName) resp, err := s.client.SendPutRequest(apiPath, lifecycle) if err != nil { - return nil, fmt.Errorf("failed to delete bucket lifecycle: %v", err) + return CommonResponse{Status: false, Message: err.Error()} } - var bucketLifecycle BucketLifecycleResponse + var bucketLifecycle CommonResponse if err := json.Unmarshal(resp, &bucketLifecycle); err != nil { - return nil, fmt.Errorf("failed to unmarshal bucket lifecycle: %v", err) + return CommonResponse{Status: false, Message: err.Error()} } - return &bucketLifecycle, nil + return bucketLifecycle } func (s *ObjectStorageServiceImpl) CreateSubUserAccessKey(vpcId, s3ServiceId, subUserId string) *SubUserCreateKeyResponse { apiPath := common.ApiPath.CreateSubUserAccessKey(vpcId, s3ServiceId, subUserId) resp, err := s.client.SendPostRequest(apiPath, nil) if err != nil { - return nil + return &SubUserCreateKeyResponse{Status: false, Message: err.Error()} } var subUserKeys SubUserCreateKeyResponse if err := json.Unmarshal(resp, &subUserKeys); err != nil { - return nil + return &SubUserCreateKeyResponse{Status: false, Message: err.Error()} } return &subUserKeys } func (s *ObjectStorageServiceImpl) DeleteSubUserAccessKey(vpcId, s3ServiceId, subUserId, accessKeyId string) CommonResponse { - apiPath := common.ApiPath.DeleteSubUserAccessKey(vpcId, s3ServiceId, subUserId, accessKeyId) - if _, err := s.client.SendDeleteRequest(apiPath); err != nil { - return CommonResponse{Status: false} + apiPath := common.ApiPath.DeleteSubUserAccessKey(vpcId, s3ServiceId, subUserId) + payload := map[string]string{"accessKey": accessKeyId} + if _, err := s.client.SendDeleteRequestWithBody(apiPath, payload); err != nil { + return CommonResponse{Status: false, Message: err.Error()} } - return CommonResponse{Status: true} + return CommonResponse{Status: true, Message: "Access key deleted successfully"} } func (s *ObjectStorageServiceImpl) DetailSubUser(vpcId, s3ServiceId, subUserId string) *DetailSubUser { diff --git a/fptcloud/object-storage/datasource_object_storage_access_key.go b/fptcloud/object-storage/datasource_object_storage_access_key.go index 275f01b..9296b70 100644 --- a/fptcloud/object-storage/datasource_object_storage_access_key.go +++ b/fptcloud/object-storage/datasource_object_storage_access_key.go @@ -21,28 +21,20 @@ func DataSourceAccessKey() *schema.Resource { "region_name": { Type: schema.TypeString, Required: true, - Description: "The region name to create the access key", + Description: "The region name that's are the same with the region name in the S3 service. Currently, we have: HCM-01, HCM-02, HN-01, HN-02", }, - "access_keys": { + "credentials": { Type: schema.TypeList, Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "credentials": { - Type: schema.TypeList, + "access_key": { + Type: schema.TypeString, + Computed: true, + }, + "active": { + Type: schema.TypeBool, Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "access_key": { - Type: schema.TypeString, - Computed: true, - }, - "active": { - Type: schema.TypeBool, - Computed: true, - }, - }, - }, }, }, }, @@ -58,49 +50,26 @@ func dataSourceAccessKeyRead(ctx context.Context, d *schema.ResourceData, m inte regionName := d.Get("region_name").(string) s3ServiceDetail := getServiceEnableRegion(service, vpcId, regionName) if s3ServiceDetail.S3ServiceId == "" { - return diag.FromErr(fmt.Errorf("region %s is not enabled", d.Get("region_name").(string))) + return diag.FromErr(fmt.Errorf("region %s is not enabled", regionName)) } - _, err := service.ListAccessKeys(vpcId, s3ServiceDetail.S3ServiceId) + keys, err := service.ListAccessKeys(vpcId, s3ServiceDetail.S3ServiceId) if err != nil { return diag.FromErr(err) } - // if len(accessKeys.Credentials) > 0 { - // d.SetId(fmt.Sprintf("access_keys_%d", len(accessKeys))) - // if err := d.Set("access_keys", flattenAccessKeys(accessKeys)); err != nil { - // return diag.FromErr(err) - // } - // } - - return nil -} - -// func flattenAccessKeys(accessKeys AccessKey) []interface{} { -// var result []interface{} -// for _, ak := range accessKeys.Credentials { -// for _, cred := range ak.Credentials { -// credMap := map[string]interface{}{ -// "id": cred.ID, -// "credentials": flattenCredentials(cred.Credentials), -// } -// result = append(result, credMap) -// } -// } -// return result -// } - -func flattenCredentials(credentials []struct { - AccessKey string `json:"accessKey"` - Active bool `json:"active"` - CreatedDate interface{} `json:"createdDate"` -}) []interface{} { - var result []interface{} - for _, cred := range credentials { - credMap := map[string]interface{}{ - "access_key": cred.AccessKey, - "active": cred.Active, + var formattedData []interface{} + for _, key := range keys.Credentials { + for _, cred := range key.Credentials { + formattedData = append(formattedData, map[string]interface{}{ + "access_key": cred.AccessKey, + "active": cred.Active, + }) } - result = append(result, credMap) } - return result + if err := d.Set("credentials", formattedData); err != nil { + return diag.FromErr(fmt.Errorf("error setting data: %v", err)) + } + d.SetId(vpcId) + + return nil } diff --git a/fptcloud/object-storage/datasource_object_storage_bucket.go b/fptcloud/object-storage/datasource_object_storage_bucket.go index 420827d..f09fc39 100644 --- a/fptcloud/object-storage/datasource_object_storage_bucket.go +++ b/fptcloud/object-storage/datasource_object_storage_bucket.go @@ -13,30 +13,59 @@ func DataSourceBucket() *schema.Resource { return &schema.Resource{ ReadContext: dataSourceBucketRead, Schema: map[string]*schema.Schema{ - "vpd_id": { + "vpc_id": { Type: schema.TypeString, Required: true, Description: "The VPC ID", }, - "name": { + "region_name": { Type: schema.TypeString, Required: true, - Description: "Name of the bucket", + Description: "The region name that's are the same with the region name in the S3 service. Currently, we have: HCM-01, HCM-02, HN-01, HN-02", }, - "region": { - Type: schema.TypeString, - Computed: true, - Description: "Region where the bucket is located", + "page": { + Type: schema.TypeInt, + Optional: true, + Description: "Page number", }, - "versioning": { - Type: schema.TypeBool, - Computed: true, - Description: "Whether versioning is enabled", + "page_size": { + Type: schema.TypeInt, + Optional: true, + Description: "Number of items per page", }, - "acl": { - Type: schema.TypeString, - Computed: true, - Description: "Access control list", + "list_bucket_result": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "endpoint": { + Type: schema.TypeString, + Required: true, + Description: "The endpoint of the bucket", + }, + "is_enabled_logging": { + Type: schema.TypeBool, + Required: true, + }, + "bucket_name": { + Type: schema.TypeString, + Required: true, + Description: "The name of the bucket", + }, + "creation_date": { + Type: schema.TypeString, + Required: true, + Description: "The creation date of the bucket", + }, + "s3_service_id": { + Type: schema.TypeString, + Required: true, + }, + "is_empty": { + Type: schema.TypeBool, + Required: true, + Description: "The bucket is empty or not", + }, + }}, }, }, } @@ -46,30 +75,38 @@ func dataSourceBucketRead(ctx context.Context, d *schema.ResourceData, m interfa client := m.(*common.Client) service := NewObjectStorageService(client) vpcId := d.Get("vpc_id").(string) - s3ServiceDetail := getServiceEnableRegion(service, vpcId, d.Get("region_name").(string)) - if s3ServiceDetail.S3ServiceId == "" { - return diag.FromErr(fmt.Errorf("region %s is not enabled", d.Get("region_name").(string))) - } page := 1 - if d.Get("page") != nil { + if d.Get("page").(int) > 0 { page = d.Get("page").(int) } pageSize := 25 - if d.Get("page_size") != nil { + if d.Get("page_size").(int) > 0 { pageSize = d.Get("page_size").(int) } + regionName := d.Get("region_name").(string) + s3ServiceDetail := getServiceEnableRegion(service, vpcId, regionName) + if s3ServiceDetail.S3ServiceId == "" { + return diag.FromErr(fmt.Errorf("region %s is not enabled", regionName)) + } buckets := service.ListBuckets(vpcId, s3ServiceDetail.S3ServiceId, page, pageSize) if buckets.Total == 0 { return diag.Errorf("no buckets found") } - - bucketName := d.Get("name").(string) + var formattedData []interface{} for _, bucket := range buckets.Buckets { - if bucket.Name == bucketName { - d.SetId(bucket.Name) - return nil - } + formattedData = append(formattedData, map[string]interface{}{ + "endpoint": bucket.Endpoint, + "is_enabled_logging": bucket.IsEnabledLogging, + "bucket_name": bucket.Name, + "creation_date": bucket.CreationDate, + "s3_service_id": bucket.S3ServiceID, + "is_empty": bucket.IsEmpty, + }) + } + if err := d.Set("list_bucket_result", formattedData); err != nil { + return diag.FromErr(fmt.Errorf("error setting data: %v", err)) } + d.SetId(vpcId) - return diag.Errorf("bucket with name %s not found", bucketName) + return nil } diff --git a/fptcloud/object-storage/datasource_object_storage_bucket_cors.go b/fptcloud/object-storage/datasource_object_storage_bucket_cors.go index 6f5460a..4a3f907 100644 --- a/fptcloud/object-storage/datasource_object_storage_bucket_cors.go +++ b/fptcloud/object-storage/datasource_object_storage_bucket_cors.go @@ -16,21 +16,38 @@ func DataSourceBucketCors() *schema.Resource { "bucket_name": { Type: schema.TypeString, Required: true, - ForceNew: true, Description: "Name of the bucket", }, "vpc_id": { Type: schema.TypeString, Required: true, - ForceNew: true, Description: "The VPC ID", }, - "cors_rule": { - Type: schema.TypeList, + "region_name": { + Type: schema.TypeString, Required: true, - Description: "The bucket cors rule", + ForceNew: true, + Description: "The region name that's are the same with the region name in the S3 service. Currently, we have: HCM-01, HCM-02, HN-01, HN-02", + }, + "page_size": { + Type: schema.TypeInt, + Optional: true, + Description: "The number of items to return in each page", + }, + "page": { + Type: schema.TypeInt, + Optional: true, + Description: "The page number", + }, + "cors_rule": { + Type: schema.TypeList, + Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, "allowed_headers": { Type: schema.TypeList, Required: true, @@ -65,6 +82,7 @@ func DataSourceBucketCors() *schema.Resource { }, }, }, + Description: "The bucket cors rule", }, }, } @@ -79,19 +97,38 @@ func dataSourceBucketCorsRead(ctx context.Context, d *schema.ResourceData, m int return diag.FromErr(fmt.Errorf("region %s is not enabled", d.Get("region_name").(string))) } bucketName := d.Get("bucket_name").(string) - - corsRule, err := service.GetBucketCors(vpcId, s3ServiceDetail.S3ServiceId, bucketName) + page := 1 + if d.Get("page").(int) > 0 { + page = d.Get("page").(int) + } + pageSize := 25 + if d.Get("page_size").(int) > 0 { + pageSize = d.Get("page_size").(int) + } + corsRule, err := service.GetBucketCors(vpcId, s3ServiceDetail.S3ServiceId, bucketName, page, pageSize) if err != nil { return diag.FromErr(err) } - if corsRule == nil { - d.SetId("") - return nil + if corsRule.Total == 0 { + return diag.Errorf("bucket %s does not have cors rule", bucketName) + } + var formattedData []interface{} + for _, rule := range corsRule.CorsRules { + formattedData = append(formattedData, map[string]interface{}{ + "id": rule.ID, + "allowed_headers": rule.AllowedHeaders, + "allowed_methods": rule.AllowedMethods, + "allowed_origins": rule.AllowedOrigins, + "expose_headers": rule.ExposeHeaders, + "max_age_seconds": rule.MaxAgeSeconds, + }) } - d.SetId(bucketName) - d.Set("cors_rule", corsRule) + if err := d.Set("cors_rule", formattedData); err != nil { + d.SetId("") + return diag.FromErr(err) + } return nil } diff --git a/fptcloud/object-storage/datasource_object_storage_bucket_lifecycle.go b/fptcloud/object-storage/datasource_object_storage_bucket_lifecycle.go new file mode 100644 index 0000000..5c1ffe3 --- /dev/null +++ b/fptcloud/object-storage/datasource_object_storage_bucket_lifecycle.go @@ -0,0 +1,197 @@ +package fptcloud_object_storage + +import ( + "context" + "fmt" + common "terraform-provider-fptcloud/commons" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func DataSourceBucketLifecycle() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceBucketLifecycleRead, + Schema: map[string]*schema.Schema{ + "vpc_id": { + Type: schema.TypeString, + Required: true, + Description: "The VPC ID", + }, + "bucket_name": { + Type: schema.TypeString, + Required: true, + Description: "Name of the bucket to fetch policy for", + }, + "region_name": { + Type: schema.TypeString, + Required: true, + Description: "The region name that's are the same with the region name in the S3 service. Currently, we have: HCM-01, HCM-02, HN-01, HN-02", + }, + "page_size": { + Type: schema.TypeInt, + Optional: true, + Description: "The number of items to return in each page", + }, + "page": { + Type: schema.TypeInt, + Optional: true, + Description: "The page number", + }, + "life_cycle_rules": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "filter": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "prefix": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "prefix": { + Type: schema.TypeString, + Computed: true, + }, + "expiration": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "days": { + Type: schema.TypeInt, + Computed: true, + }, + "expired_object_delete_marker": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + "noncurrent_version_expiration": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "noncurrent_days": { + Type: schema.TypeInt, + Computed: true, + }, + }, + }, + }, + "abort_incomplete_multipart_upload": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "days_after_initiation": { + Type: schema.TypeInt, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func dataSourceBucketLifecycleRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + client := m.(*common.Client) + service := NewObjectStorageService(client) + + bucketName := d.Get("bucket_name").(string) + vpcId := d.Get("vpc_id").(string) + regionName := d.Get("region_name").(string) + s3ServiceDetail := getServiceEnableRegion(service, vpcId, regionName) + if s3ServiceDetail.S3ServiceId == "" { + return diag.FromErr(fmt.Errorf("region %s is not enabled", regionName)) + } + page := 1 + v, ok := d.GetOk("page") + if ok { + page = v.(int) + } + pageSize := 25 + v, ok = d.GetOk("page_size") + if ok { + pageSize = v.(int) + } + + lifeCycleResponse := service.GetBucketLifecycle(vpcId, s3ServiceDetail.S3ServiceId, bucketName, page, pageSize) + if !lifeCycleResponse.Status { + return diag.FromErr(fmt.Errorf("failed to fetch life cycle rules for bucket %s", bucketName)) + } + d.SetId(bucketName) + var formattedData []interface{} + if lifeCycleResponse.Total == 0 { + d.Set("life_cycle_rules", make([]interface{}, 0)) + } + for _, lifecycleRule := range lifeCycleResponse.Rules { + data := map[string]interface{}{ + "id": lifecycleRule.ID, + "status": lifecycleRule.Status, + "noncurrent_version_expiration": []interface{}{ + map[string]interface{}{ + "noncurrent_days": lifecycleRule.NoncurrentVersionExpiration.NoncurrentDays, + }, + }, + "abort_incomplete_multipart_upload": []interface{}{ + map[string]interface{}{ + "days_after_initiation": lifecycleRule.AbortIncompleteMultipartUpload.DaysAfterInitiation, + }, + }, + } + // for fully prefix + if lifecycleRule.Prefix == "" { + data["prefix"] = lifecycleRule.Prefix + } + // for filter + if lifecycleRule.Filter.Prefix != "" { + data["filter"] = []interface{}{ + map[string]interface{}{ + "prefix": lifecycleRule.Filter.Prefix, + }, + } + } + if lifecycleRule.Expiration.Days > 0 { + data["expiration"] = []interface{}{ + map[string]interface{}{ + "days": lifecycleRule.Expiration.Days, + }, + } + } + if lifecycleRule.Expiration.ExpiredObjectDeleteMarker { + data["expiration"] = []interface{}{ + map[string]interface{}{ + "expired_object_delete_marker": lifecycleRule.Expiration.ExpiredObjectDeleteMarker, + }, + } + } + formattedData = append(formattedData, data) + } + + if err := d.Set("life_cycle_rules", formattedData); err != nil { + d.SetId("") + return diag.FromErr(err) + } + return nil +} diff --git a/fptcloud/object-storage/datasource_object_storage_bucket_policy.go b/fptcloud/object-storage/datasource_object_storage_bucket_policy.go index 6200bdc..b7d158b 100644 --- a/fptcloud/object-storage/datasource_object_storage_bucket_policy.go +++ b/fptcloud/object-storage/datasource_object_storage_bucket_policy.go @@ -23,6 +23,11 @@ func DataSourceBucketPolicy() *schema.Resource { Required: true, Description: "Name of the bucket to fetch policy for", }, + "region_name": { + Type: schema.TypeString, + Required: true, + Description: "The region name that's are the same with the region name in the S3 service. Currently, we have: HCM-01, HCM-02, HN-01, HN-02", + }, "policy": { Type: schema.TypeString, Computed: true, @@ -42,17 +47,18 @@ func dataSourceBucketPolicyRead(ctx context.Context, d *schema.ResourceData, m i if s3ServiceDetail.S3ServiceId == "" { return diag.FromErr(fmt.Errorf("region %s is not enabled", d.Get("region_name").(string))) } - policyResponse := service.GetBucketPolicy(vpcId, bucketName, s3ServiceDetail.S3ServiceId) + policyResponse := service.GetBucketPolicy(vpcId, s3ServiceDetail.S3ServiceId, bucketName) if !policyResponse.Status { return diag.Errorf("failed to get bucket policy for bucket %s", bucketName) } - // Set the ID to be a combination of bucket name to ensure unique data source - d.SetId(fmt.Sprintf("bucket_policy_%s", bucketName)) - + // Set the policy field in the schema if err := d.Set("policy", policyResponse.Policy); err != nil { + d.SetId("") return diag.FromErr(err) } + // Set the ID to be a combination of bucket name to ensure unique data source + d.SetId(fmt.Sprintf("bucket_policy_%s", bucketName)) return nil } diff --git a/fptcloud/object-storage/datasource_object_storage_bucket_static_website.go b/fptcloud/object-storage/datasource_object_storage_bucket_static_website.go new file mode 100644 index 0000000..882334a --- /dev/null +++ b/fptcloud/object-storage/datasource_object_storage_bucket_static_website.go @@ -0,0 +1,71 @@ +package fptcloud_object_storage + +import ( + "context" + "fmt" + common "terraform-provider-fptcloud/commons" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func DataSourceBucketStaticWebsite() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceBucketStaticWebsite, + Schema: map[string]*schema.Schema{ + "vpc_id": { + Type: schema.TypeString, + Required: true, + Description: "The VPC ID", + }, + "bucket_name": { + Type: schema.TypeString, + Required: true, + Description: "Name of the bucket to fetch policy for", + }, + "region_name": { + Type: schema.TypeString, + Required: true, + Description: "The region name that's are the same with the region name in the S3 service. Currently, we have: HCM-01, HCM-02, HN-01, HN-02", + }, + "index_document_suffix": { + Type: schema.TypeString, + Optional: true, + }, + "error_document_key": { + Type: schema.TypeString, + Optional: true, + }, + }, + } +} + +func dataSourceBucketStaticWebsite(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + client := m.(*common.Client) + service := NewObjectStorageService(client) + + bucketName := d.Get("bucket_name").(string) + vpcId := d.Get("vpc_id").(string) + s3ServiceDetail := getServiceEnableRegion(service, vpcId, d.Get("region_name").(string)) + if s3ServiceDetail.S3ServiceId == "" { + return diag.FromErr(fmt.Errorf("region %s is not enabled", d.Get("region_name").(string))) + } + + staticWebsiteResponse := service.GetBucketWebsite(vpcId, s3ServiceDetail.S3ServiceId, bucketName) + if !staticWebsiteResponse.Status { + return diag.Errorf("failed to get bucket static website config for bucket %s", bucketName) + } + if staticWebsiteResponse.Config.IndexDocument.Suffix == "" && staticWebsiteResponse.Config.ErrorDocument.Key == "" { + return diag.Errorf("bucket %s does not have static website configuration", bucketName) + } + if err := d.Set("index_document_suffix", staticWebsiteResponse.Config.IndexDocument.Suffix); err != nil { + d.SetId("") + return diag.FromErr(err) + } + if err := d.Set("error_document_key", staticWebsiteResponse.Config.ErrorDocument.Key); err != nil { + d.SetId("") + return diag.FromErr(err) + } + d.SetId(bucketName) + return nil +} diff --git a/fptcloud/object-storage/datasource_object_storage_versioning.go b/fptcloud/object-storage/datasource_object_storage_bucket_versioning.go similarity index 76% rename from fptcloud/object-storage/datasource_object_storage_versioning.go rename to fptcloud/object-storage/datasource_object_storage_bucket_versioning.go index 1f73478..e9eb8f7 100644 --- a/fptcloud/object-storage/datasource_object_storage_versioning.go +++ b/fptcloud/object-storage/datasource_object_storage_bucket_versioning.go @@ -19,14 +19,14 @@ func DataSourceBucketVersioning() *schema.Resource { ForceNew: true, Description: "Name of the bucket", }, - "vpd_id": { + "vpc_id": { Type: schema.TypeString, Required: true, Description: "The VPC ID", }, "versioning_status": { Type: schema.TypeString, - Required: true, + Optional: true, Description: "Status of the versioning, must be Enabled or Suspended", ForceNew: true, // Marking this field as ForceNew to ensure that the resource is recreated when the value is changed }, @@ -34,7 +34,7 @@ func DataSourceBucketVersioning() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - Description: "The region name to create the access key", + Description: "The region name that's are the same with the region name in the S3 service. Currently, we have: HCM-01, HCM-02, HN-01, HN-02", }, }, } @@ -51,13 +51,16 @@ func dataSourceBucketVersioningRead(ctx context.Context, d *schema.ResourceData, } bucketName := d.Get("bucket_name").(string) - versioning := service.GetBucketVersioning(vpcId, bucketName, s3ServiceDetail.S3ServiceId) - if versioning == nil { - return diag.Errorf("failed to get bucket versioning for bucket %s", bucketName) + versioning := service.GetBucketVersioning(vpcId, s3ServiceDetail.S3ServiceId, bucketName) + if !versioning.Status { + return diag.Errorf("Could not get versioning status for bucket %s", bucketName) } + if err := d.Set("versioning_status", versioning.Config); err != nil { + d.SetId("") + return diag.FromErr(err) + } d.SetId(bucketName) - d.Set("versioning_status", versioning.Status) return nil } diff --git a/fptcloud/object-storage/datasource_object_storage_lifecycle.go b/fptcloud/object-storage/datasource_object_storage_lifecycle.go deleted file mode 100644 index 34a883c..0000000 --- a/fptcloud/object-storage/datasource_object_storage_lifecycle.go +++ /dev/null @@ -1,76 +0,0 @@ -package fptcloud_object_storage - -import ( - "context" - "fmt" - common "terraform-provider-fptcloud/commons" - - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceBucketLifecycle() *schema.Resource { - return &schema.Resource{ - ReadContext: dataSourceBucketLifecycle, - Schema: map[string]*schema.Schema{ - "vpc_id": { - Type: schema.TypeString, - Required: true, - Description: "The VPC ID", - }, - "bucket_name": { - Type: schema.TypeString, - Required: true, - Description: "Name of the bucket to fetch policy for", - }, - "policy": { - Type: schema.TypeString, - Computed: true, - Description: "The bucket policy in JSON format", - }, - "region_name": { - Type: schema.TypeString, - Required: true, - Description: "The region name of the bucket", - }, - "page_size": { - Type: schema.TypeString, - Optional: true, - Default: "25", - Description: "The number of items to return in each page", - }, - "page": { - Type: schema.TypeString, - Optional: true, - Default: "1", - Description: "The page number", - }, - }, - } -} - -func dataSourceBucketLifecycle(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - client := m.(*common.Client) - service := NewObjectStorageService(client) - - bucketName := d.Get("bucket_name").(string) - vpcId := d.Get("vpc_id").(string) - s3ServiceDetail := getServiceEnableRegion(service, vpcId, d.Get("region_name").(string)) - if s3ServiceDetail.S3ServiceId == "" { - return diag.FromErr(fmt.Errorf("region %s is not enabled", d.Get("region_name").(string))) - } - page := d.Get("page").(string) - pageSize := d.Get("page_size").(string) - - lifeCycleResponse, err := service.GetBucketLifecycle(vpcId, s3ServiceDetail.S3ServiceId, bucketName, page, pageSize) - if err != nil { - return diag.FromErr(err) - } - - d.SetId(fmt.Sprintf("%s-%s", vpcId, bucketName)) - if err := d.Set("policy", lifeCycleResponse.Rules); err != nil { - return diag.FromErr(err) - } - - return nil -} diff --git a/fptcloud/object-storage/datasource_object_storage_region.go b/fptcloud/object-storage/datasource_object_storage_region.go new file mode 100644 index 0000000..60e12d6 --- /dev/null +++ b/fptcloud/object-storage/datasource_object_storage_region.go @@ -0,0 +1,73 @@ +package fptcloud_object_storage + +import ( + "context" + "fmt" + common "terraform-provider-fptcloud/commons" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func DataSourceS3ServiceEnableResponse() *schema.Resource { + return &schema.Resource{ + ReadContext: resourceS3ServiceEnableResponseRead, + Schema: map[string]*schema.Schema{ + "s3_enable_services": { + Type: schema.TypeList, + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "s3_service_name": { + Type: schema.TypeString, + Required: true, + Description: "The name of the S3 service also known as region_name, could be used to create/delete another resources", + }, + "s3_service_id": { + Type: schema.TypeString, + Required: true, + }, + "s3_platform": { + Type: schema.TypeString, + Required: true, + Description: "The platform of the S3 service", + }, + }}, + Computed: true, + }, + "vpc_id": { + Type: schema.TypeString, + Required: true, + Description: "The ID of the VPC", + }, + }, + } +} + +func resourceS3ServiceEnableResponseRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + client := m.(*common.Client) + service := NewObjectStorageService(client) + vpcId := d.Get("vpc_id").(string) + listRegion := service.CheckServiceEnable(vpcId) + if listRegion.Total == 0 { + return diag.FromErr(fmt.Errorf("no region is enabled")) + } + var formattedData []interface{} + + for _, item := range listRegion.Data { + formattedData = append(formattedData, map[string]interface{}{ + "s3_service_name": item.S3ServiceName, + "s3_service_id": item.S3ServiceID, + "s3_platform": item.S3Platform, + }) + } + if listRegion.Data == nil { + return diag.FromErr(fmt.Errorf("failed to get response from API")) + } + if listRegion.Total == 0 { + return diag.FromErr(fmt.Errorf("no region is enabled")) + } + if err := d.Set("s3_enable_services", formattedData); err != nil { + return diag.FromErr(fmt.Errorf("error setting data: %v", err)) + } + d.SetId(vpcId) + return nil +} diff --git a/fptcloud/object-storage/datasource_object_storage_static_website.go b/fptcloud/object-storage/datasource_object_storage_static_website.go deleted file mode 100644 index d227fa5..0000000 --- a/fptcloud/object-storage/datasource_object_storage_static_website.go +++ /dev/null @@ -1,144 +0,0 @@ -package fptcloud_object_storage - -import ( - "context" - "fmt" - common "terraform-provider-fptcloud/commons" - - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func DataSourceBucketStaticWebsite() *schema.Resource { - return &schema.Resource{ - ReadContext: dataSourceBucketStaticWebsite, - Schema: map[string]*schema.Schema{ - "vpc_id": { - Type: schema.TypeString, - Required: true, - Description: "The VPC ID", - }, - "bucket_name": { - Type: schema.TypeString, - Required: true, - Description: "Name of the bucket to fetch policy for", - }, - "region_name": { - Type: schema.TypeString, - Required: true, - Description: "The region name of the bucket", - }, - "status": { - Type: schema.TypeBool, - Computed: true, - Description: "Status of the bucket website configuration", - }, - "request_id": { - Type: schema.TypeString, - Computed: true, - Description: "Request ID of the operation", - }, - "host_id": { - Type: schema.TypeString, - Computed: true, - Description: "Host ID of the operation", - }, - "http_status_code": { - Type: schema.TypeInt, - Computed: true, - Description: "HTTP status code of the operation", - }, - "http_headers": { - Type: schema.TypeMap, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Description: "HTTP headers of the response", - }, - "retry_attempts": { - Type: schema.TypeInt, - Computed: true, - Description: "Number of retry attempts", - }, - "index_document": { - Type: schema.TypeString, - Computed: true, - Description: "Suffix for index document", - ForceNew: true, - }, - "error_document": { - Type: schema.TypeString, - Computed: true, - Description: "Key for error document", - ForceNew: true, - }, - }, - } -} - -func dataSourceBucketStaticWebsite(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - client := m.(*common.Client) - service := NewObjectStorageService(client) - - bucketName := d.Get("bucket_name").(string) - vpcId := d.Get("vpc_id").(string) - s3ServiceDetail := getServiceEnableRegion(service, vpcId, d.Get("region_name").(string)) - if s3ServiceDetail.S3ServiceId == "" { - return diag.FromErr(fmt.Errorf("region %s is not enabled", d.Get("region_name").(string))) - } - - staticWebsiteResponse := service.GetBucketWebsite(vpcId, bucketName, s3ServiceDetail.S3ServiceId) - if !staticWebsiteResponse.Status { - return diag.Errorf("failed to get bucket static website config for bucket %s", bucketName) - } - - // Set the computed values - if err := d.Set("status", staticWebsiteResponse.Status); err != nil { - return diag.FromErr(err) - } - - if staticWebsiteResponse.Config.ResponseMetadata.RequestID != "" { - if err := d.Set("request_id", staticWebsiteResponse.Config.ResponseMetadata.RequestID); err != nil { - return diag.FromErr(err) - } - } - - if staticWebsiteResponse.Config.ResponseMetadata.HostID != "" { - if err := d.Set("host_id", staticWebsiteResponse.Config.ResponseMetadata.HostID); err != nil { - return diag.FromErr(err) - } - } - - if err := d.Set("http_status_code", staticWebsiteResponse.Config.ResponseMetadata.HTTPStatusCode); err != nil { - return diag.FromErr(err) - } - - headers := map[string]string{ - "x-amz-request-id": staticWebsiteResponse.Config.ResponseMetadata.HTTPHeaders.XAmzRequestID, - "content-type": staticWebsiteResponse.Config.ResponseMetadata.HTTPHeaders.ContentType, - "content-length": staticWebsiteResponse.Config.ResponseMetadata.HTTPHeaders.ContentLength, - "date": staticWebsiteResponse.Config.ResponseMetadata.HTTPHeaders.Date, - } - if err := d.Set("http_headers", headers); err != nil { - return diag.FromErr(err) - } - - if err := d.Set("retry_attempts", staticWebsiteResponse.Config.ResponseMetadata.RetryAttempts); err != nil { - return diag.FromErr(err) - } - - if staticWebsiteResponse.Config.IndexDocument.Suffix != "" { - if err := d.Set("index_document", staticWebsiteResponse.Config.IndexDocument.Suffix); err != nil { - return diag.FromErr(err) - } - } - - if staticWebsiteResponse.Config.ErrorDocument.Key != "" { - if err := d.Set("error_document", staticWebsiteResponse.Config.ErrorDocument.Key); err != nil { - return diag.FromErr(err) - } - } - - return nil -} diff --git a/fptcloud/object-storage/datasource_object_storage_sub_user.go b/fptcloud/object-storage/datasource_object_storage_sub_user.go index 88560ec..ad32393 100644 --- a/fptcloud/object-storage/datasource_object_storage_sub_user.go +++ b/fptcloud/object-storage/datasource_object_storage_sub_user.go @@ -9,23 +9,11 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) -// datasource_object_storage_sub_user.go func DataSourceSubUser() *schema.Resource { return &schema.Resource{ ReadContext: dataSourceSubUserRead, Schema: map[string]*schema.Schema{ - "role": { - Type: schema.TypeString, - Required: true, - Description: "Role of the sub-user, should be one of the following: SubUserNone, SubUserRead, SubUserReadWrite, SubUserWrite, SubUserFull", - }, - "user_id": { - Type: schema.TypeString, - Description: "ID of the sub-user", - ForceNew: true, - Required: true, - }, - "vpd_id": { + "vpc_id": { Type: schema.TypeString, Required: true, Description: "The VPC ID", @@ -34,11 +22,22 @@ func DataSourceSubUser() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - Description: "The region name of sub-user", + Description: "The region name that's are the same with the region name in the S3 service. Currently, we have: HCM-01, HCM-02, HN-01, HN-02", + }, + "page": { + Optional: true, + Type: schema.TypeInt, + Description: "Page number", + }, + "page_size": { + Optional: true, + Type: schema.TypeInt, + Description: "Number of items per page", }, "list_sub_user": { - Type: schema.TypeList, - Computed: true, + Type: schema.TypeList, + Computed: true, + Description: "List of sub-users", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "user_id": { @@ -49,6 +48,14 @@ func DataSourceSubUser() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "active": { + Type: schema.TypeBool, + Computed: true, + }, + "arn": { + Type: schema.TypeString, + Computed: true, + }, }, }, }, @@ -60,25 +67,40 @@ func dataSourceSubUserRead(ctx context.Context, d *schema.ResourceData, m interf client := m.(*common.Client) service := NewObjectStorageService(client) vpcId := d.Get("vpc_id").(string) - s3ServiceDetail := getServiceEnableRegion(service, vpcId, d.Get("region_name").(string)) + regionName := d.Get("region_name").(string) + s3ServiceDetail := getServiceEnableRegion(service, vpcId, regionName) if s3ServiceDetail.S3ServiceId == "" { - return diag.FromErr(fmt.Errorf("region %s is not enabled", d.Get("region_name").(string))) + return diag.FromErr(fmt.Errorf("region %s is not enabled", regionName)) + } + page := 1 + pageSize := 100 + if d.Get("page").(int) > 0 { + page = d.Get("page").(int) + } + if d.Get("page_size").(int) > 0 { + pageSize = d.Get("page_size").(int) } - subUsers, err := service.ListSubUsers(vpcId, s3ServiceDetail.S3ServiceId) + subUsers, err := service.ListSubUsers(vpcId, s3ServiceDetail.S3ServiceId, page, pageSize) if err != nil { return diag.FromErr(err) } + if subUsers.Total == 0 { + return diag.FromErr(fmt.Errorf("no sub-user found")) + } + var formattedData []interface{} + for _, subUser := range subUsers.SubUsers { + formattedData = append(formattedData, map[string]interface{}{ + "user_id": subUser.UserID, + "role": subUser.Role, + "active": subUser.Active, + "arn": subUser.Arn, + }) + } + if err := d.Set("list_sub_user", formattedData); err != nil { + return diag.FromErr(fmt.Errorf("error setting list_sub_user: %s", err)) + } + d.SetId(vpcId) - role := d.Get("role").(string) - fmt.Println("subUsers: ", subUsers) - // for _, user := range subUsers { - // if user.Role == role { - // d.SetId(user.UserId) - // d.Set("user_id", user.UserId) - // return nil - // } - // } - - return diag.Errorf("sub-user with role %s not found", role) + return nil } diff --git a/fptcloud/object-storage/datasource_object_storage_sub_user_detail.go b/fptcloud/object-storage/datasource_object_storage_sub_user_detail.go index 9a223f2..9660399 100644 --- a/fptcloud/object-storage/datasource_object_storage_sub_user_detail.go +++ b/fptcloud/object-storage/datasource_object_storage_sub_user_detail.go @@ -3,7 +3,6 @@ package fptcloud_object_storage import ( "context" "fmt" - "reflect" common "terraform-provider-fptcloud/commons" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -68,7 +67,7 @@ func dataSourceSubUserDetailRead(ctx context.Context, d *schema.ResourceData, m subUserId := d.Get("user_id").(string) subUser := objectStorageService.DetailSubUser(vpcId, s3ServiceDetail.S3ServiceId, subUserId) - if subUser == nil { + if subUser.UserID == "" { return diag.Errorf("sub-user with ID %s not found", subUserId) } @@ -77,8 +76,7 @@ func dataSourceSubUserDetailRead(ctx context.Context, d *schema.ResourceData, m if subUser.Arn != nil { d.Set("arn", subUser.Arn) } - fmt.Println("subUser active is: ", subUser.Active) - fmt.Println("reflect subUser active is: ", reflect.TypeOf(subUser.Active)) + d.Set("active", subUser.Active) d.Set("role", subUser.Role) if subUser.CreatedAt != nil { d.Set("created_at", subUser.CreatedAt) diff --git a/fptcloud/object-storage/resource_access_key.go b/fptcloud/object-storage/resource_access_key.go index eb5ab4d..85b8189 100644 --- a/fptcloud/object-storage/resource_access_key.go +++ b/fptcloud/object-storage/resource_access_key.go @@ -39,7 +39,7 @@ func ResourceAccessKey() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - Description: "The region name to create the access key", + Description: "The region name that's are the same with the region name in the S3 service. Currently, we have: HCM-01, HCM-02, HN-01, HN-02", }, "status": { Type: schema.TypeBool, @@ -72,7 +72,7 @@ func resourceAccessKeyCreate(ctx context.Context, d *schema.ResourceData, m inte resp := service.CreateAccessKey(vpcId, s3ServiceDetail.S3ServiceId) if !resp.Status { - return diag.Errorf(resp.Message) + return diag.Errorf("failed to delete sub-user access key: %s", resp.Message) } if resp.Credential.AccessKey != "" { @@ -124,9 +124,6 @@ func resourceAccessKeyDelete(ctx context.Context, d *schema.ResourceData, m inte accessKeyId = d.Get("access_key_id").(string) } - log.Printf("[DEBUG] Starting deletion of access key. VPC ID: %s, Region: %s, Access Key ID: %s", - vpcId, regionName, accessKeyId) - s3ServiceDetail := getServiceEnableRegion(service, vpcId, regionName) if s3ServiceDetail.S3ServiceId == "" { log.Printf("[ERROR] Region %s is not enabled for VPC %s", regionName, vpcId) @@ -140,16 +137,12 @@ func resourceAccessKeyDelete(ctx context.Context, d *schema.ResourceData, m inte return diag.Errorf("access_key_id is required for deletion") } - log.Printf("[INFO] Attempting to delete access key %s for VPC %s in region %s", - accessKeyId, vpcId, regionName) - err := service.DeleteAccessKey(vpcId, s3ServiceDetail.S3ServiceId, accessKeyId) if err != nil { log.Printf("[ERROR] Failed to delete access key %s: %v", accessKeyId, err) return diag.FromErr(err) } - - log.Printf("[INFO] Successfully deleted access key %s", accessKeyId) + d.Set("status", true) d.SetId("") return nil } diff --git a/fptcloud/object-storage/resource_bucket.go b/fptcloud/object-storage/resource_bucket.go index 3527e6e..20d9695 100644 --- a/fptcloud/object-storage/resource_bucket.go +++ b/fptcloud/object-storage/resource_bucket.go @@ -38,7 +38,7 @@ func ResourceBucket() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - Description: "The region name that's are the same with the region name in the S3 service.", + Description: "The region name that's are the same with the region name in the S3 service. Currently, we have: HCM-01, HCM-02, HN-01, HN-02", }, "acl": { Type: schema.TypeString, diff --git a/fptcloud/object-storage/resource_bucket_acl.go b/fptcloud/object-storage/resource_bucket_acl.go index e6f3cc4..6636813 100644 --- a/fptcloud/object-storage/resource_bucket_acl.go +++ b/fptcloud/object-storage/resource_bucket_acl.go @@ -32,7 +32,7 @@ func ResourceBucketAcl() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - Description: "The region name where the bucket is located, e.g., HCM-02, can be retrieved when creating the bucket", + Description: "The region name that's are the same with the region name in the S3 service. Currently, we have: HCM-01, HCM-02, HN-01, HN-02", }, "canned_acl": { Type: schema.TypeString, @@ -65,7 +65,6 @@ func resourceBucketAclCreate(ctx context.Context, d *schema.ResourceData, m inte regionName := d.Get("region_name").(string) cannedAcl := d.Get("canned_acl").(string) applyObjects := d.Get("apply_objects").(bool) - fmt.Println("applyObjects", applyObjects) if cannedAcl != "private" && cannedAcl != "public-read" { return diag.Errorf("canned_acl must be either private or public-read, got %s", cannedAcl) } @@ -108,6 +107,5 @@ func resourceBucketAclRead(ctx context.Context, d *schema.ResourceData, m interf func resourceBucketAclDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { // Remove the resource from the state d.SetId("") - fmt.Println("Delete operation is not supported for bucket ACLs. This is a no-op.") - return nil + return diag.Errorf("Delete operation is not supported for bucket ACLs. This is a no-op.") } diff --git a/fptcloud/object-storage/resource_bucket_cors.go b/fptcloud/object-storage/resource_bucket_cors.go index 7b00a03..af36d19 100644 --- a/fptcloud/object-storage/resource_bucket_cors.go +++ b/fptcloud/object-storage/resource_bucket_cors.go @@ -2,19 +2,21 @@ package fptcloud_object_storage import ( "context" + "encoding/json" "fmt" common "terraform-provider-fptcloud/commons" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) func ResourceBucketCors() *schema.Resource { return &schema.Resource{ CreateContext: resourceBucketCorsCreate, - UpdateContext: resourceBucketCorsUpdate, + UpdateContext: nil, DeleteContext: resourceBucketCorsDelete, - ReadContext: dataSourceBucketCorsRead, + ReadContext: resourceBucketCorsRead, Schema: map[string]*schema.Schema{ "bucket_name": { Type: schema.TypeString, @@ -27,43 +29,41 @@ func ResourceBucketCors() *schema.Resource { Required: true, ForceNew: true, Description: "The VPC ID", - }, "cors_rule": { - Type: schema.TypeList, + }, + "region_name": { + Type: schema.TypeString, Required: true, - Description: "The bucket cors rule", + ForceNew: true, + Description: "The region name that's are the same with the region name in the S3 service. Currently, we have: HCM-01, HCM-02, HN-01, HN-02", + }, + "cors_config": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The bucket lifecycle rule in JSON format, support only one rule", + ConflictsWith: []string{"cors_config_file"}, + ValidateFunc: validation.StringIsJSON, + }, + "cors_config_file": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Path to the JSON file containing the bucket lifecycle rule, support only one rule", + ConflictsWith: []string{"cors_config"}, + }, + "status": { + Type: schema.TypeBool, + Computed: true, + Description: "Status after bucket cors rule is created", + }, + "bucket_cors_rules": { + Type: schema.TypeList, + Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "allowed_headers": { - Type: schema.TypeList, - Required: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "allowed_methods": { - Type: schema.TypeList, - Required: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "allowed_origins": { - Type: schema.TypeList, - Required: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "expose_headers": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "max_age_seconds": { - Type: schema.TypeInt, - Optional: true, + "id": { + Type: schema.TypeString, + Computed: true, }, }, }, @@ -76,78 +76,134 @@ func resourceBucketCorsCreate(ctx context.Context, d *schema.ResourceData, m int client := m.(*common.Client) service := NewObjectStorageService(client) vpcId := d.Get("vpc_id").(string) - s3ServiceDetail := getServiceEnableRegion(service, vpcId, d.Get("region_name").(string)) + bucketName := d.Get("bucket_name").(string) + regionName := d.Get("region_name").(string) + s3ServiceDetail := getServiceEnableRegion(service, vpcId, regionName) if s3ServiceDetail.S3ServiceId == "" { - return diag.FromErr(fmt.Errorf("region %s is not enabled", d.Get("region_name").(string))) + return diag.FromErr(fmt.Errorf("region %s is not enabled", regionName)) } - bucketName := d.Get("bucket_name").(string) - corsRule := d.Get("cors_rule").([]interface{}) - - cors := make([]CorsRule, 0) - for _, rule := range corsRule { - r := rule.(map[string]interface{}) - cors = append(cors, CorsRule{ - AllowedHeaders: r["allowed_headers"].([]string), - AllowedMethods: r["allowed_methods"].([]string), - AllowedOrigins: r["allowed_origins"].([]string), - ExposeHeaders: r["expose_headers"].([]string), - MaxAgeSeconds: r["max_age_seconds"].(int), - ID: "", // should implement later - }) + var corsConfigData string + if v, ok := d.GetOk("cors_config"); ok { + corsConfigData = v.(string) + } else if v, ok := d.GetOk("cors_config_file"); ok { + // The actual file reading is handled by Terraform's built-in file() function + // in the configuration, so we just get the content here + corsConfigData = v.(string) + } else { + return diag.FromErr(fmt.Errorf("either 'cors_config' or 'cors_config_file' must be specified")) } - - _, err := service.PutBucketCors(bucketName, vpcId, s3ServiceDetail.S3ServiceId, CorsRule{ - AllowedHeaders: cors[0].AllowedHeaders, - AllowedMethods: cors[0].AllowedMethods, - AllowedOrigins: cors[0].AllowedOrigins, - ExposeHeaders: cors[0].ExposeHeaders, - }) + var jsonMap CorsRule + err := json.Unmarshal([]byte(corsConfigData), &jsonMap) if err != nil { - return diag.Errorf("failed to create bucket cors for bucket %s", bucketName) + return diag.FromErr(err) + } + payload := map[string]interface{}{ + "ID": jsonMap.ID, + "AllowedMethods": jsonMap.AllowedMethods, + "AllowedOrigins": jsonMap.AllowedOrigins, + "MaxAgeSeconds": jsonMap.MaxAgeSeconds, + } + if len(jsonMap.AllowedHeaders) > 0 { + payload["AllowedHeaders"] = jsonMap.AllowedHeaders + } + if len(jsonMap.ExposeHeaders) > 0 { + payload["ExposeHeaders"] = jsonMap.ExposeHeaders + } + r := service.CreateBucketCors(vpcId, s3ServiceDetail.S3ServiceId, bucketName, payload) + if !r.Status { + d.Set("status", false) + return diag.FromErr(fmt.Errorf("%s", r.Message)) } - d.SetId(bucketName) + if err := d.Set("status", true); err != nil { + return diag.FromErr(err) + } return nil } - -func resourceBucketCorsUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { +func resourceBucketCorsRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { client := m.(*common.Client) service := NewObjectStorageService(client) - bucketName := d.Get("bucket_name").(string) vpcId := d.Get("vpc_id").(string) - s3ServiceDetail := getServiceEnableRegion(service, vpcId, d.Get("region_name").(string)) + regionName := d.Get("region_name").(string) + s3ServiceDetail := getServiceEnableRegion(service, vpcId, regionName) if s3ServiceDetail.S3ServiceId == "" { - return diag.FromErr(fmt.Errorf("region %s is not enabled", d.Get("region_name").(string))) + return diag.FromErr(fmt.Errorf("region %s is not enabled", regionName)) } + page := 1 + pageSize := 999999 - corsRule := d.Get("cors_rule").([]interface{}) - - cors := make([]CorsRule, 0) - for _, rule := range corsRule { - r := rule.(map[string]interface{}) - cors = append(cors, CorsRule{ - AllowedHeaders: r["allowed_headers"].([]string), - AllowedMethods: r["allowed_methods"].([]string), - AllowedOrigins: r["allowed_origins"].([]string), - ExposeHeaders: r["expose_headers"].([]string), - MaxAgeSeconds: r["max_age_seconds"].(int), - ID: "random-string-id", // should implement later - }) - } - - _, err := service.UpdateBucketCors(vpcId, s3ServiceDetail.S3ServiceId, bucketName, BucketCors{ - CorsRules: cors, - }) - if err != nil { - return diag.Errorf("failed to update bucket cors for bucket %s", bucketName) + bucketCorsDetails, _ := service.GetBucketCors(vpcId, s3ServiceDetail.S3ServiceId, bucketName, page, pageSize) + if !bucketCorsDetails.Status { + return diag.FromErr(fmt.Errorf("failed to fetch life cycle rules for bucket %s", bucketName)) } - d.SetId(bucketName) + var formattedData []interface{} + if bucketCorsDetails.Total == 0 { + d.Set("bucket_cors_rules", make([]interface{}, 0)) + } + for _, corsRuleDetail := range bucketCorsDetails.CorsRules { + data := map[string]interface{}{ + "id": corsRuleDetail.ID, + } + formattedData = append(formattedData, data) + } + if err := d.Set("bucket_cors_rules", formattedData); err != nil { + d.SetId("") + return diag.FromErr(err) + } return nil } func resourceBucketCorsDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - return resourceBucketCorsUpdate(ctx, d, m) + client := m.(*common.Client) + service := NewObjectStorageService(client) + bucketName := d.Get("bucket_name").(string) + vpcId := d.Get("vpc_id").(string) + regionName := d.Get("region_name").(string) + s3ServiceDetail := getServiceEnableRegion(service, vpcId, regionName) + if s3ServiceDetail.S3ServiceId == "" { + return diag.FromErr(fmt.Errorf("region %s is not enabled", regionName)) + } + var corsConfigData string + if v, ok := d.GetOk("cors_config"); ok { + corsConfigData = v.(string) + } else if v, ok := d.GetOk("cors_config_file"); ok { + // The actual file reading is handled by Terraform's built-in file() function + // in the configuration, so we just get the content here + corsConfigData = v.(string) + } else { + return diag.FromErr(fmt.Errorf("either 'cors_config' or 'cors_config_file' must be specified")) + } + var jsonMap []CorsRule + err := json.Unmarshal([]byte(corsConfigData), &jsonMap) + if err != nil { + return diag.FromErr(err) + } + var payload []map[string]interface{} + for _, corsRule := range jsonMap { + payload := map[string]interface{}{ + "ID": corsRule.ID, + "AllowedMethods": corsRule.AllowedMethods, + "AllowedOrigins": corsRule.AllowedOrigins, + "MaxAgeSeconds": corsRule.MaxAgeSeconds, + } + if len(corsRule.AllowedHeaders) > 0 { + payload["AllowedHeaders"] = corsRule.AllowedHeaders + } + if len(corsRule.ExposeHeaders) > 0 { + payload["ExposeHeaders"] = corsRule.ExposeHeaders + } + } + r := service.UpdateBucketCors(vpcId, s3ServiceDetail.S3ServiceId, bucketName, payload) + if !r.Status { + d.Set("status", false) + return diag.FromErr(fmt.Errorf("%s", r.Message)) + } + d.SetId(bucketName) + if err := d.Set("status", true); err != nil { + return diag.FromErr(err) + } + return nil } diff --git a/fptcloud/object-storage/resource_bucket_lifecycle.go b/fptcloud/object-storage/resource_bucket_lifecycle.go new file mode 100644 index 0000000..21db918 --- /dev/null +++ b/fptcloud/object-storage/resource_bucket_lifecycle.go @@ -0,0 +1,215 @@ +package fptcloud_object_storage + +import ( + "context" + "encoding/json" + "fmt" + common "terraform-provider-fptcloud/commons" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" +) + +func ResourceBucketLifeCycle() *schema.Resource { + return &schema.Resource{ + CreateContext: resourceBucketLifeCycleCreate, + UpdateContext: nil, + DeleteContext: resourceBucketLifeCycleDelete, + ReadContext: resourceBucketLifeCycleRead, + Schema: map[string]*schema.Schema{ + "vpc_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The VPC ID", + }, + "bucket_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Name of the bucket", + }, + "region_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The region name that's are the same with the region name in the S3 service. Currently, we have: HCM-01, HCM-02, HN-01, HN-02", + }, + "life_cycle_rule": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The bucket lifecycle rule in JSON format, support only one rule", + ConflictsWith: []string{"life_cycle_rule_file"}, + ValidateFunc: validation.StringIsJSON, + }, + "life_cycle_rule_file": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Path to the JSON file containing the bucket lifecycle rule, support only one rule", + ConflictsWith: []string{"life_cycle_rule"}, + }, + "state": { + Type: schema.TypeBool, + Computed: true, + Description: "State after bucket lifecycle rule is created", + }, + "rules": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func resourceBucketLifeCycleCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + client := m.(*common.Client) + service := NewObjectStorageService(client) + bucketName := d.Get("bucket_name").(string) + regionName := d.Get("region_name").(string) + vpcId := d.Get("vpc_id").(string) + + var lifecycleRuleContent string + if v, ok := d.GetOk("life_cycle_rule"); ok { + lifecycleRuleContent = v.(string) + } else if v, ok := d.GetOk("life_cycle_rule_file"); ok { + // The actual file reading is handled by Terraform's built-in file() function + // in the configuration, so we just get the content here + lifecycleRuleContent = v.(string) + } else { + return diag.FromErr(fmt.Errorf("either 'life_cycle_rule' or 'life_cycle_rule_file' must be specified")) + } + s3ServiceDetail := getServiceEnableRegion(service, vpcId, regionName) + if s3ServiceDetail.S3ServiceId == "" { + return diag.FromErr(fmt.Errorf("region %s is not enabled", regionName)) + } + var jsonMap S3BucketLifecycleConfig + err := json.Unmarshal([]byte(lifecycleRuleContent), &jsonMap) + if err != nil { + return diag.FromErr(err) + } + payload := map[string]interface{}{ + "ID": jsonMap.ID, + "Filter": map[string]interface{}{"Prefix": jsonMap.Filter.Prefix}, + "NoncurrentVersionExpiration": map[string]interface{}{"NoncurrentDays": jsonMap.NoncurrentVersionExpiration.NoncurrentDays}, + "AbortIncompleteMultipartUpload": map[string]interface{}{"DaysAfterInitiation": jsonMap.AbortIncompleteMultipartUpload.DaysAfterInitiation}, + } + if jsonMap.Expiration.Days != 0 && jsonMap.Expiration.ExpiredObjectDeleteMarker { + return diag.FromErr(fmt.Errorf("Expiration.Days and Expiration.ExpiredObjectDeleteMarker cannot be set at the same time")) + } + if jsonMap.Expiration.Days != 0 { + payload["Expiration"] = map[string]interface{}{"Days": jsonMap.Expiration.Days} + } + if jsonMap.Expiration.ExpiredObjectDeleteMarker { + payload["Expiration"] = map[string]interface{}{"ExpiredObjectDeleteMarker": jsonMap.Expiration.ExpiredObjectDeleteMarker} + } + r := service.PutBucketLifecycle(vpcId, s3ServiceDetail.S3ServiceId, bucketName, payload) + if !r.Status { + d.Set("state", false) + return diag.FromErr(fmt.Errorf("%s", r.Message)) + } + d.SetId(bucketName) + if err := d.Set("state", true); err != nil { + return diag.FromErr(err) + } + + return nil +} +func resourceBucketLifeCycleRead(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + client := m.(*common.Client) + service := NewObjectStorageService(client) + bucketName := d.Get("bucket_name").(string) + vpcId := d.Get("vpc_id").(string) + regionName := d.Get("region_name").(string) + s3ServiceDetail := getServiceEnableRegion(service, vpcId, regionName) + if s3ServiceDetail.S3ServiceId == "" { + return diag.FromErr(fmt.Errorf("region %s is not enabled", regionName)) + } + page := 1 + pageSize := 999999 + + lifeCycleResponse := service.GetBucketLifecycle(vpcId, s3ServiceDetail.S3ServiceId, bucketName, page, pageSize) + if !lifeCycleResponse.Status { + return diag.FromErr(fmt.Errorf("failed to fetch life cycle rules for bucket %s", bucketName)) + } + d.SetId(bucketName) + var formattedData []interface{} + if lifeCycleResponse.Total == 0 { + d.Set("life_cycle_rules", make([]interface{}, 0)) + } + for _, lifecycleRule := range lifeCycleResponse.Rules { + data := map[string]interface{}{ + "id": lifecycleRule.ID, + } + formattedData = append(formattedData, data) + } + + if err := d.Set("rules", formattedData); err != nil { + d.SetId("") + return diag.FromErr(err) + } + return nil +} +func resourceBucketLifeCycleDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + client := m.(*common.Client) + service := NewObjectStorageService(client) + bucketName := d.Get("bucket_name").(string) + vpcId := d.Get("vpc_id").(string) + regionName := d.Get("region_name").(string) + s3ServiceDetail := getServiceEnableRegion(service, vpcId, regionName) + if s3ServiceDetail.S3ServiceId == "" { + return diag.FromErr(fmt.Errorf("region %s is not enabled", regionName)) + } + var lifecycleRuleContent string + if v, ok := d.GetOk("life_cycle_rule"); ok { + lifecycleRuleContent = v.(string) + } else if v, ok := d.GetOk("life_cycle_rule_file"); ok { + // The actual file reading is handled by Terraform's built-in file() function + // in the configuration, so we just get the content here + lifecycleRuleContent = v.(string) + } else { + return diag.FromErr(fmt.Errorf("either 'life_cycle_rule' or 'life_cycle_rule_file' must be specified")) + } + var jsonMap S3BucketLifecycleConfig + err := json.Unmarshal([]byte(lifecycleRuleContent), &jsonMap) + if err != nil { + return diag.FromErr(err) + } + payload := map[string]interface{}{ + "ID": jsonMap.ID, + "Filter": map[string]interface{}{"Prefix": jsonMap.Filter.Prefix}, + "NoncurrentVersionExpiration": map[string]interface{}{"NoncurrentDays": jsonMap.NoncurrentVersionExpiration.NoncurrentDays}, + "AbortIncompleteMultipartUpload": map[string]interface{}{"DaysAfterInitiation": jsonMap.AbortIncompleteMultipartUpload.DaysAfterInitiation}, + "OrgID": jsonMap.ID, + "Status": "Enabled", + } + if jsonMap.Expiration.Days != 0 && jsonMap.Expiration.ExpiredObjectDeleteMarker { + return diag.FromErr(fmt.Errorf("Expiration.Days and Expiration.ExpiredObjectDeleteMarker cannot be set at the same time")) + } + if jsonMap.Expiration.Days != 0 { + payload["Expiration"] = map[string]interface{}{"Days": jsonMap.Expiration.Days} + } + if jsonMap.Expiration.ExpiredObjectDeleteMarker { + payload["Expiration"] = map[string]interface{}{"ExpiredObjectDeleteMarker": jsonMap.Expiration.ExpiredObjectDeleteMarker} + } + r := service.DeleteBucketLifecycle(vpcId, s3ServiceDetail.S3ServiceId, bucketName, payload) + if !r.Status { + d.Set("state", false) + return diag.FromErr(fmt.Errorf("%s", r.Message)) + } + d.SetId(bucketName) + if err := d.Set("state", true); err != nil { + return diag.FromErr(err) + } + return resourceBucketLifeCycleRead(ctx, d, m) +} diff --git a/fptcloud/object-storage/resource_bucket_policy.go b/fptcloud/object-storage/resource_bucket_policy.go index 0755538..b1d257d 100644 --- a/fptcloud/object-storage/resource_bucket_policy.go +++ b/fptcloud/object-storage/resource_bucket_policy.go @@ -2,42 +2,59 @@ package fptcloud_object_storage import ( "context" + "encoding/json" "fmt" common "terraform-provider-fptcloud/commons" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) func ResourceBucketPolicy() *schema.Resource { return &schema.Resource{ CreateContext: resourceBucketPolicyCreate, - UpdateContext: resourceBucketPolicyUpdate, + UpdateContext: nil, DeleteContext: resourceBucketPolicyDelete, ReadContext: dataSourceBucketPolicyRead, Schema: map[string]*schema.Schema{ - "bucket_name": { + "vpc_id": { Type: schema.TypeString, Required: true, ForceNew: true, - Description: "Name of the bucket", - }, - "policy": { - Type: schema.TypeString, - Required: true, - Description: "The bucket policy in JSON format", + Description: "The VPC ID", }, "region_name": { Type: schema.TypeString, Required: true, ForceNew: true, - Description: "The region name of the bucket", + Description: "The region name that's are the same with the region name in the S3 service. Currently, we have: HCM-01, HCM-02, HN-01, HN-02", }, - "vpc_id": { + "bucket_name": { Type: schema.TypeString, Required: true, ForceNew: true, - Description: "The VPC ID", + Description: "Name of the bucket", + }, + "policy": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The bucket policy in JSON format", + ConflictsWith: []string{"policy_file"}, + ValidateFunc: validation.StringIsJSON, + }, + "policy_file": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Path to the JSON file containing the bucket policy", + ConflictsWith: []string{"policy"}, + }, + "status": { + Type: schema.TypeBool, + Computed: true, + Description: "Status after bucket policy is created", }, }, } @@ -48,28 +65,47 @@ func resourceBucketPolicyCreate(ctx context.Context, d *schema.ResourceData, m i service := NewObjectStorageService(client) bucketName := d.Get("bucket_name").(string) - policy := d.Get("policy").(string) vpcId := d.Get("vpc_id").(string) regionName := d.Get("region_name").(string) + + // Get policy content either from policy or policy_file + var policyContent string + if v, ok := d.GetOk("policy"); ok { + policyContent = v.(string) + } else if v, ok := d.GetOk("policy_file"); ok { + // The actual file reading is handled by Terraform's built-in file() function + // in the configuration, so we just get the content here + policyContent = v.(string) + } else { + return diag.FromErr(fmt.Errorf("either 'policy' or 'policy_file' must be specified")) + } + s3ServiceDetail := getServiceEnableRegion(service, vpcId, regionName) if s3ServiceDetail.S3ServiceId == "" { - return diag.FromErr(fmt.Errorf("region %s is not enabled", d.Get("region_name").(string))) + return diag.FromErr(fmt.Errorf("region %s is not enabled", regionName)) } - - resp := service.PutBucketPolicy(vpcId, s3ServiceDetail.S3ServiceId, bucketName, BucketPolicyRequest{ - Policy: policy, - }) + var jsonMap map[string]interface{} + err := json.Unmarshal([]byte(policyContent), &jsonMap) + if err != nil { + return diag.FromErr(err) + } + // Reverse from string into json object for matching with the API request + payload := map[string]interface{}{ + "policy": jsonMap, + } + resp := service.PutBucketPolicy(vpcId, s3ServiceDetail.S3ServiceId, bucketName, payload) if !resp.Status { - return diag.Errorf("failed to create bucket policy for bucket %s", bucketName) + d.Set("status", false) + return diag.Errorf(fmt.Sprintf("Error create bucket policy: %s", resp.Message)) } - d.SetId(bucketName) - return nil -} + if err := d.Set("status", true); err != nil { + d.SetId("") + return diag.FromErr(err) + } -func resourceBucketPolicyUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - return resourceBucketPolicyCreate(ctx, d, m) + return nil } func resourceBucketPolicyDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { diff --git a/fptcloud/object-storage/resource_bucket_static_website.go b/fptcloud/object-storage/resource_bucket_static_website.go index c64953a..6636c9f 100644 --- a/fptcloud/object-storage/resource_bucket_static_website.go +++ b/fptcloud/object-storage/resource_bucket_static_website.go @@ -25,7 +25,7 @@ func ResourceBucketStaticWebsite() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - Description: "The region name of the bucket", + Description: "The region name that's are the same with the region name in the S3 service. Currently, we have: HCM-01, HCM-02, HN-01, HN-02", }, "vpc_id": { Type: schema.TypeString, @@ -80,9 +80,6 @@ func resourceBucketStaticWebsiteCreate(ctx context.Context, d *schema.ResourceDa Suffix: indexDocument, Key: errorDocument, }) - fmt.Println("--------------------------------------- \n:") - fmt.Println("--------------------------------------- \n: ", putBucketWebsite) - fmt.Println("--------------------------------------- \n: ") if !putBucketWebsite.Status { diag.Errorf("failed to create bucket website for bucket %s", bucketName) @@ -91,7 +88,7 @@ func resourceBucketStaticWebsiteCreate(ctx context.Context, d *schema.ResourceDa } d.Set("status", true) d.SetId(bucketName) - return dataSourceBucketStaticWebsite(ctx, d, m) + return nil } func resourceDeleteBucketStaticWebsite(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { @@ -110,6 +107,7 @@ func resourceDeleteBucketStaticWebsite(ctx context.Context, d *schema.ResourceDa if !resp.Status { return diag.Errorf("failed to delete bucket website for bucket %s", bucketName) } + d.SetId("") return nil } diff --git a/fptcloud/object-storage/resource_bucket_versioning.go b/fptcloud/object-storage/resource_bucket_versioning.go index d388099..dd671e4 100644 --- a/fptcloud/object-storage/resource_bucket_versioning.go +++ b/fptcloud/object-storage/resource_bucket_versioning.go @@ -31,7 +31,7 @@ func ResourceBucketVersioning() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, - Description: "The region name of the bucket", + Description: "The region name that's are the same with the region name in the S3 service. Currently, we have: HCM-01, HCM-02, HN-01, HN-02", }, "vpc_id": { Type: schema.TypeString, diff --git a/fptcloud/object-storage/resource_sub_user.go b/fptcloud/object-storage/resource_sub_user.go index d4e5cfc..d2231d1 100644 --- a/fptcloud/object-storage/resource_sub_user.go +++ b/fptcloud/object-storage/resource_sub_user.go @@ -31,9 +31,10 @@ func ResourceSubUser() *schema.Resource { ForceNew: true, }, "region_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The region name that's are the same with the region name in the S3 service. Currently, we have: HCM-01, HCM-02, HN-01, HN-02", }, }, } @@ -48,20 +49,39 @@ func resourceSubUserCreate(ctx context.Context, d *schema.ResourceData, m interf Role: d.Get("role").(string), UserId: subUserId, } + s3ServiceDetail := getServiceEnableRegion(objectStorageService, vpcId, d.Get("region_name").(string)) if s3ServiceDetail.S3ServiceId == "" { return diag.FromErr(fmt.Errorf("region %s is not enabled", d.Get("region_name").(string))) } - subUser := objectStorageService.CreateSubUser(req, vpcId, s3ServiceDetail.S3ServiceId) - if !subUser.Status { - return diag.FromErr(fmt.Errorf(subUser.Message)) + err := objectStorageService.CreateSubUser(req, vpcId, s3ServiceDetail.S3ServiceId) + if !err.Status { + return diag.FromErr(fmt.Errorf("error creating sub-user: %s", err.Message)) } + // Set the resource ID after successful creation d.SetId(subUserId) - return dataSourceSubUserDetailRead(ctx, d, m) -} + d.Set("user_id", subUserId) + return nil +} +func readDetailSubUserOnly(ctx context.Context, d *schema.ResourceData, m interface{}, subUserId string) diag.Diagnostics { + client := m.(*common.Client) + objectStorageService := NewObjectStorageService(client) + vpcId := d.Get("vpc_id").(string) + s3ServiceDetail := getServiceEnableRegion(objectStorageService, vpcId, d.Get("region_name").(string)) + if s3ServiceDetail.S3ServiceId == "" { + return diag.FromErr(fmt.Errorf("region %s is not enabled", d.Get("region_name").(string))) + } + subUser := objectStorageService.DetailSubUser(subUserId, vpcId, s3ServiceDetail.S3ServiceId) + if subUser == nil { + d.SetId("") + return nil + } + d.SetId(subUserId) + return nil +} func resourceSubUserDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { client := m.(*common.Client) objectStorageService := NewObjectStorageService(client) diff --git a/fptcloud/object-storage/resource_sub_user_keys.go b/fptcloud/object-storage/resource_sub_user_keys.go index 7e157e7..36ae2b1 100644 --- a/fptcloud/object-storage/resource_sub_user_keys.go +++ b/fptcloud/object-storage/resource_sub_user_keys.go @@ -2,6 +2,7 @@ package fptcloud_object_storage import ( "context" + "fmt" common "terraform-provider-fptcloud/commons" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -11,23 +12,36 @@ import ( func ResourceSubUserKeys() *schema.Resource { return &schema.Resource{ CreateContext: resourceSubUserAccessKeyCreate, - ReadContext: dataSourceSubUserRead, + ReadContext: resourceReadUserDetail, DeleteContext: resourceSubUserAccessKeyDelete, Schema: map[string]*schema.Schema{ - "user_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, "vpc_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The VPC id that the S3 service belongs to", }, "region_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The region name that's are the same with the region name in the S3 service. Currently, we have: HCM-01, HCM-02, HN-01, HN-02", + }, + "user_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The sub user id, can retrieve from data source `fptcloud_object_storage_sub_user`", + }, + "access_key": { + Type: schema.TypeString, + Computed: true, + Description: "The access key of the sub user", + }, + "secret_key": { + Type: schema.TypeString, + Computed: true, + Description: "The secret key of the sub user", }, }, } @@ -37,34 +51,58 @@ func resourceSubUserAccessKeyCreate(ctx context.Context, d *schema.ResourceData, objectStorageService := NewObjectStorageService(client) vpcId := d.Get("vpc_id").(string) - s3ServiceId := d.Get("s3_service_id").(string) - subUserId := d.Get("sub_user_id").(string) + subUserId := d.Get("user_id").(string) + s3ServiceDetail := getServiceEnableRegion(objectStorageService, vpcId, d.Get("region_name").(string)) + if s3ServiceDetail.S3ServiceId == "" { + return diag.FromErr(fmt.Errorf("region %s is not enabled", d.Get("region_name").(string))) + } + resp := objectStorageService.CreateSubUserAccessKey(vpcId, s3ServiceDetail.S3ServiceId, subUserId) - accessKey := objectStorageService.CreateSubUserAccessKey(vpcId, s3ServiceId, subUserId) - if accessKey == nil { - return diag.Errorf("failed to create sub-user access key") + if !resp.Status { + return diag.FromErr(fmt.Errorf("error creating sub-user access key: %s", resp.Message)) } - d.SetId(accessKey.Credential.AccessKey) - d.Set("access_key", accessKey.Credential.AccessKey) - d.Set("secret_key", accessKey.Credential.SecretKey) + d.SetId(resp.Credential.AccessKey) + d.Set("access_key", resp.Credential.AccessKey) + d.Set("secret_key", resp.Credential.SecretKey) - return dataSourceSubUserRead(ctx, d, m) + return nil } +func resourceReadUserDetail(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + client := m.(*common.Client) + objectStorageService := NewObjectStorageService(client) -func resourceSubUserAccessKeyDelete(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + vpcId := d.Get("vpc_id").(string) + s3ServiceDetail := getServiceEnableRegion(objectStorageService, vpcId, d.Get("region_name").(string)) + if s3ServiceDetail.S3ServiceId == "" { + return diag.FromErr(fmt.Errorf("region %s is not enabled", d.Get("region_name").(string))) + } + subUserId := d.Get("user_id").(string) + + subUser := objectStorageService.DetailSubUser(vpcId, s3ServiceDetail.S3ServiceId, subUserId) + if subUser.UserID == "" { + return diag.Errorf("sub-user with ID %s not found", subUserId) + } + d.Set("user_id", subUser.UserID) + return nil +} +func resourceSubUserAccessKeyDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { client := m.(*common.Client) objectStorageService := NewObjectStorageService(client) vpcId := d.Get("vpc_id").(string) - s3ServiceId := d.Get("s3_service_id").(string) - subUserId := d.Get("sub_user_id").(string) - accessKeyId := d.Id() + s3ServiceDetail := getServiceEnableRegion(objectStorageService, vpcId, d.Get("region_name").(string)) + if s3ServiceDetail.S3ServiceId == "" { + return diag.FromErr(fmt.Errorf("region %s is not enabled", d.Get("region_name").(string))) + } + subUserId := d.Get("user_id").(string) + accessKeyToDelete := d.Get("access_key").(string) - resp := objectStorageService.DeleteSubUserAccessKey(vpcId, s3ServiceId, subUserId, accessKeyId) + resp := objectStorageService.DeleteSubUserAccessKey(vpcId, s3ServiceDetail.S3ServiceId, subUserId, accessKeyToDelete) if !resp.Status { - return diag.Errorf("failed to delete sub-user access key") + return diag.Errorf("failed to delete sub-user access key: %s", resp.Message) } + d.SetId("") - return nil + return resourceReadUserDetail(ctx, d, m) } diff --git a/fptcloud/provider.go b/fptcloud/provider.go index 6703cc0..89c6273 100644 --- a/fptcloud/provider.go +++ b/fptcloud/provider.go @@ -81,9 +81,11 @@ func Provider() *schema.Provider { "fptcloud_object_storage_bucket_policy": fptcloud_object_storage.DataSourceBucketPolicy(), "fptcloud_object_storage_bucket_cors": fptcloud_object_storage.DataSourceBucketCors(), "fptcloud_object_storage_bucket_versioning": fptcloud_object_storage.DataSourceBucketVersioning(), - "fptcloud_object_storage_lifecycle": fptcloud_object_storage.DataSourceBucketLifecycle(), - "fptcloud_object_storage_static_website": fptcloud_object_storage.DataSourceBucketStaticWebsite(), + "fptcloud_object_storage_bucket_lifecycle": fptcloud_object_storage.DataSourceBucketLifecycle(), + "fptcloud_object_storage_bucket_static_website": fptcloud_object_storage.DataSourceBucketStaticWebsite(), "fptcloud_object_storage_sub_user_detail": fptcloud_object_storage.DataSourceSubUserDetail(), + "fptcloud_s3_service_enable": fptcloud_object_storage.DataSourceS3ServiceEnableResponse(), + "fptcloud_object_storage_bucket_acl": fptcloud_object_storage.DataSourceBucketAcl(), }, ResourcesMap: map[string]*schema.Resource{ "fptcloud_storage": fptcloud_storage.ResourceStorage(), @@ -104,6 +106,7 @@ func Provider() *schema.Provider { "fptcloud_object_storage_bucket_static_website": fptcloud_object_storage.ResourceBucketStaticWebsite(), "fptcloud_object_storage_bucket_acl": fptcloud_object_storage.ResourceBucketAcl(), "fptcloud_object_storage_sub_user_key": fptcloud_object_storage.ResourceSubUserKeys(), + "fptcloud_object_storage_bucket_lifecycle": fptcloud_object_storage.ResourceBucketLifeCycle(), }, ConfigureContextFunc: providerConfigureContext, } From f0d4ee9a72d4fa0c153f28d354c7563213d15a3d Mon Sep 17 00:00:00 2001 From: hoanglm Date: Mon, 18 Nov 2024 22:39:45 +0700 Subject: [PATCH 4/8] [Object Storage] update: docs for resource and data source tf [Object Storage] update: docs for resource and data source tf --- .../data-sources/object_storage_access_key.md | 34 ++++++++ docs/data-sources/object_storage_bucket.md | 43 +++++++++++ .../data-sources/object_storage_bucket_acl.md | 64 +++++++++++++++ .../object_storage_bucket_cors.md | 44 +++++++++++ .../object_storage_bucket_lifecycle.md | 77 +++++++++++++++++++ .../object_storage_bucket_policy.md | 27 +++++++ .../object_storage_bucket_static_website.md | 31 ++++++++ .../object_storage_bucket_versioning.md | 30 ++++++++ docs/data-sources/object_storage_sub_user.md | 41 ++++++++++ .../object_storage_sub_user_detail.md | 30 ++++++++ docs/data-sources/s3_service_enable.md | 34 ++++++++ docs/resources/object_storage_access_key.md | 32 ++++++++ docs/resources/object_storage_bucket.md | 32 ++++++++ docs/resources/object_storage_bucket_acl.md | 32 ++++++++ docs/resources/object_storage_bucket_cors.md | 40 ++++++++++ .../object_storage_bucket_lifecycle.md | 40 ++++++++++ .../resources/object_storage_bucket_policy.md | 32 ++++++++ .../object_storage_bucket_static_website.md | 32 ++++++++ .../object_storage_bucket_versioning.md | 27 +++++++ docs/resources/object_storage_sub_user.md | 27 +++++++ docs/resources/object_storage_sub_user_key.md | 28 +++++++ 21 files changed, 777 insertions(+) create mode 100644 docs/data-sources/object_storage_access_key.md create mode 100644 docs/data-sources/object_storage_bucket.md create mode 100644 docs/data-sources/object_storage_bucket_acl.md create mode 100644 docs/data-sources/object_storage_bucket_cors.md create mode 100644 docs/data-sources/object_storage_bucket_lifecycle.md create mode 100644 docs/data-sources/object_storage_bucket_policy.md create mode 100644 docs/data-sources/object_storage_bucket_static_website.md create mode 100644 docs/data-sources/object_storage_bucket_versioning.md create mode 100644 docs/data-sources/object_storage_sub_user.md create mode 100644 docs/data-sources/object_storage_sub_user_detail.md create mode 100644 docs/data-sources/s3_service_enable.md create mode 100644 docs/resources/object_storage_access_key.md create mode 100644 docs/resources/object_storage_bucket.md create mode 100644 docs/resources/object_storage_bucket_acl.md create mode 100644 docs/resources/object_storage_bucket_cors.md create mode 100644 docs/resources/object_storage_bucket_lifecycle.md create mode 100644 docs/resources/object_storage_bucket_policy.md create mode 100644 docs/resources/object_storage_bucket_static_website.md create mode 100644 docs/resources/object_storage_bucket_versioning.md create mode 100644 docs/resources/object_storage_sub_user.md create mode 100644 docs/resources/object_storage_sub_user_key.md diff --git a/docs/data-sources/object_storage_access_key.md b/docs/data-sources/object_storage_access_key.md new file mode 100644 index 0000000..7a796d1 --- /dev/null +++ b/docs/data-sources/object_storage_access_key.md @@ -0,0 +1,34 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "fptcloud_object_storage_access_key Data Source - terraform-provider-fptcloud" +subcategory: "" +description: |- + +--- + +# fptcloud_object_storage_access_key (Data Source) + + + + + + +## Schema + +### Required + +- `region_name` (String) The region name that's are the same with the region name in the S3 service. Currently, we have: HCM-01, HCM-02, HN-01, HN-02 +- `vpc_id` (String) + +### Read-Only + +- `credentials` (List of Object) (see [below for nested schema](#nestedatt--credentials)) +- `id` (String) The ID of this resource. + + +### Nested Schema for `credentials` + +Read-Only: + +- `access_key` (String) +- `active` (Boolean) diff --git a/docs/data-sources/object_storage_bucket.md b/docs/data-sources/object_storage_bucket.md new file mode 100644 index 0000000..9d5067e --- /dev/null +++ b/docs/data-sources/object_storage_bucket.md @@ -0,0 +1,43 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "fptcloud_object_storage_bucket Data Source - terraform-provider-fptcloud" +subcategory: "" +description: |- + +--- + +# fptcloud_object_storage_bucket (Data Source) + + + + + + +## Schema + +### Required + +- `region_name` (String) The region name that's are the same with the region name in the S3 service. Currently, we have: HCM-01, HCM-02, HN-01, HN-02 +- `vpc_id` (String) The VPC ID + +### Optional + +- `page` (Number) Page number +- `page_size` (Number) Number of items per page + +### Read-Only + +- `id` (String) The ID of this resource. +- `list_bucket_result` (List of Object) (see [below for nested schema](#nestedatt--list_bucket_result)) + + +### Nested Schema for `list_bucket_result` + +Read-Only: + +- `bucket_name` (String) +- `creation_date` (String) +- `endpoint` (String) +- `is_empty` (Boolean) +- `is_enabled_logging` (Boolean) +- `s3_service_id` (String) diff --git a/docs/data-sources/object_storage_bucket_acl.md b/docs/data-sources/object_storage_bucket_acl.md new file mode 100644 index 0000000..c223955 --- /dev/null +++ b/docs/data-sources/object_storage_bucket_acl.md @@ -0,0 +1,64 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "fptcloud_object_storage_bucket_acl Data Source - terraform-provider-fptcloud" +subcategory: "" +description: |- + +--- + +# fptcloud_object_storage_bucket_acl (Data Source) + + + + + + +## Schema + +### Required + +- `bucket_name` (String) Name of the bucket to config the ACL +- `region_name` (String) The region name that's are the same with the region name in the S3 service. Currently, we have: HCM-01, HCM-02, HN-01, HN-02 +- `vpc_id` (String) The VPC ID + +### Read-Only + +- `bucket_acl` (List of Object) (see [below for nested schema](#nestedatt--bucket_acl)) +- `canned_acl` (String) The Access Control List (ACL) status of the bucket which can be one of the following values: private, public-read, default is private +- `id` (String) The ID of this resource. +- `status` (Boolean) The status after configuring the bucket ACL + + +### Nested Schema for `bucket_acl` + +Read-Only: + +- `grants` (List of Object) (see [below for nested schema](#nestedobjatt--bucket_acl--grants)) +- `owner` (List of Object) (see [below for nested schema](#nestedobjatt--bucket_acl--owner)) + + +### Nested Schema for `bucket_acl.grants` + +Read-Only: + +- `grantee` (List of Object) (see [below for nested schema](#nestedobjatt--bucket_acl--grants--grantee)) +- `permission` (String) + + +### Nested Schema for `bucket_acl.grants.grantee` + +Read-Only: + +- `display_name` (String) +- `id` (String) +- `type` (String) + + + + +### Nested Schema for `bucket_acl.owner` + +Read-Only: + +- `display_name` (String) +- `id` (String) diff --git a/docs/data-sources/object_storage_bucket_cors.md b/docs/data-sources/object_storage_bucket_cors.md new file mode 100644 index 0000000..ed9c22d --- /dev/null +++ b/docs/data-sources/object_storage_bucket_cors.md @@ -0,0 +1,44 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "fptcloud_object_storage_bucket_cors Data Source - terraform-provider-fptcloud" +subcategory: "" +description: |- + +--- + +# fptcloud_object_storage_bucket_cors (Data Source) + + + + + + +## Schema + +### Required + +- `bucket_name` (String) Name of the bucket +- `region_name` (String) The region name that's are the same with the region name in the S3 service. Currently, we have: HCM-01, HCM-02, HN-01, HN-02 +- `vpc_id` (String) The VPC ID + +### Optional + +- `page` (Number) The page number +- `page_size` (Number) The number of items to return in each page + +### Read-Only + +- `cors_rule` (List of Object) The bucket cors rule (see [below for nested schema](#nestedatt--cors_rule)) +- `id` (String) The ID of this resource. + + +### Nested Schema for `cors_rule` + +Read-Only: + +- `allowed_headers` (List of String) +- `allowed_methods` (List of String) +- `allowed_origins` (List of String) +- `expose_headers` (List of String) +- `id` (String) +- `max_age_seconds` (Number) diff --git a/docs/data-sources/object_storage_bucket_lifecycle.md b/docs/data-sources/object_storage_bucket_lifecycle.md new file mode 100644 index 0000000..75ea227 --- /dev/null +++ b/docs/data-sources/object_storage_bucket_lifecycle.md @@ -0,0 +1,77 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "fptcloud_object_storage_bucket_lifecycle Data Source - terraform-provider-fptcloud" +subcategory: "" +description: |- + +--- + +# fptcloud_object_storage_bucket_lifecycle (Data Source) + + + + + + +## Schema + +### Required + +- `bucket_name` (String) Name of the bucket to fetch policy for +- `region_name` (String) The region name that's are the same with the region name in the S3 service. Currently, we have: HCM-01, HCM-02, HN-01, HN-02 +- `vpc_id` (String) The VPC ID + +### Optional + +- `page` (Number) The page number +- `page_size` (Number) The number of items to return in each page + +### Read-Only + +- `id` (String) The ID of this resource. +- `life_cycle_rules` (List of Object) (see [below for nested schema](#nestedatt--life_cycle_rules)) + + +### Nested Schema for `life_cycle_rules` + +Read-Only: + +- `abort_incomplete_multipart_upload` (List of Object) (see [below for nested schema](#nestedobjatt--life_cycle_rules--abort_incomplete_multipart_upload)) +- `expiration` (List of Object) (see [below for nested schema](#nestedobjatt--life_cycle_rules--expiration)) +- `filter` (List of Object) (see [below for nested schema](#nestedobjatt--life_cycle_rules--filter)) +- `id` (String) +- `noncurrent_version_expiration` (List of Object) (see [below for nested schema](#nestedobjatt--life_cycle_rules--noncurrent_version_expiration)) +- `prefix` (String) +- `status` (String) + + +### Nested Schema for `life_cycle_rules.abort_incomplete_multipart_upload` + +Read-Only: + +- `days_after_initiation` (Number) + + + +### Nested Schema for `life_cycle_rules.expiration` + +Read-Only: + +- `days` (Number) +- `expired_object_delete_marker` (Boolean) + + + +### Nested Schema for `life_cycle_rules.filter` + +Read-Only: + +- `prefix` (String) + + + +### Nested Schema for `life_cycle_rules.noncurrent_version_expiration` + +Read-Only: + +- `noncurrent_days` (Number) diff --git a/docs/data-sources/object_storage_bucket_policy.md b/docs/data-sources/object_storage_bucket_policy.md new file mode 100644 index 0000000..8b8d7e3 --- /dev/null +++ b/docs/data-sources/object_storage_bucket_policy.md @@ -0,0 +1,27 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "fptcloud_object_storage_bucket_policy Data Source - terraform-provider-fptcloud" +subcategory: "" +description: |- + +--- + +# fptcloud_object_storage_bucket_policy (Data Source) + + + + + + +## Schema + +### Required + +- `bucket_name` (String) Name of the bucket to fetch policy for +- `region_name` (String) The region name that's are the same with the region name in the S3 service. Currently, we have: HCM-01, HCM-02, HN-01, HN-02 +- `vpc_id` (String) The VPC ID + +### Read-Only + +- `id` (String) The ID of this resource. +- `policy` (String) The bucket policy in JSON format diff --git a/docs/data-sources/object_storage_bucket_static_website.md b/docs/data-sources/object_storage_bucket_static_website.md new file mode 100644 index 0000000..f6c58a8 --- /dev/null +++ b/docs/data-sources/object_storage_bucket_static_website.md @@ -0,0 +1,31 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "fptcloud_object_storage_bucket_static_website Data Source - terraform-provider-fptcloud" +subcategory: "" +description: |- + +--- + +# fptcloud_object_storage_bucket_static_website (Data Source) + + + + + + +## Schema + +### Required + +- `bucket_name` (String) Name of the bucket to fetch policy for +- `region_name` (String) The region name that's are the same with the region name in the S3 service. Currently, we have: HCM-01, HCM-02, HN-01, HN-02 +- `vpc_id` (String) The VPC ID + +### Optional + +- `error_document_key` (String) +- `index_document_suffix` (String) + +### Read-Only + +- `id` (String) The ID of this resource. diff --git a/docs/data-sources/object_storage_bucket_versioning.md b/docs/data-sources/object_storage_bucket_versioning.md new file mode 100644 index 0000000..8c2e521 --- /dev/null +++ b/docs/data-sources/object_storage_bucket_versioning.md @@ -0,0 +1,30 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "fptcloud_object_storage_bucket_versioning Data Source - terraform-provider-fptcloud" +subcategory: "" +description: |- + +--- + +# fptcloud_object_storage_bucket_versioning (Data Source) + + + + + + +## Schema + +### Required + +- `bucket_name` (String) Name of the bucket +- `region_name` (String) The region name that's are the same with the region name in the S3 service. Currently, we have: HCM-01, HCM-02, HN-01, HN-02 +- `vpc_id` (String) The VPC ID + +### Optional + +- `versioning_status` (String) Status of the versioning, must be Enabled or Suspended + +### Read-Only + +- `id` (String) The ID of this resource. diff --git a/docs/data-sources/object_storage_sub_user.md b/docs/data-sources/object_storage_sub_user.md new file mode 100644 index 0000000..6c90c02 --- /dev/null +++ b/docs/data-sources/object_storage_sub_user.md @@ -0,0 +1,41 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "fptcloud_object_storage_sub_user Data Source - terraform-provider-fptcloud" +subcategory: "" +description: |- + +--- + +# fptcloud_object_storage_sub_user (Data Source) + + + + + + +## Schema + +### Required + +- `region_name` (String) The region name that's are the same with the region name in the S3 service. Currently, we have: HCM-01, HCM-02, HN-01, HN-02 +- `vpc_id` (String) The VPC ID + +### Optional + +- `page` (Number) Page number +- `page_size` (Number) Number of items per page + +### Read-Only + +- `id` (String) The ID of this resource. +- `list_sub_user` (List of Object) List of sub-users (see [below for nested schema](#nestedatt--list_sub_user)) + + +### Nested Schema for `list_sub_user` + +Read-Only: + +- `active` (Boolean) +- `arn` (String) +- `role` (String) +- `user_id` (String) diff --git a/docs/data-sources/object_storage_sub_user_detail.md b/docs/data-sources/object_storage_sub_user_detail.md new file mode 100644 index 0000000..4905e8c --- /dev/null +++ b/docs/data-sources/object_storage_sub_user_detail.md @@ -0,0 +1,30 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "fptcloud_object_storage_sub_user_detail Data Source - terraform-provider-fptcloud" +subcategory: "" +description: |- + +--- + +# fptcloud_object_storage_sub_user_detail (Data Source) + + + + + + +## Schema + +### Required + +- `vpc_id` (String) The VPC ID + +### Read-Only + +- `access_keys` (List of String) The sub-user's access keys +- `active` (Boolean) Whether the sub-user is active +- `arn` (String) The sub-user ARN +- `created_at` (String) The sub-user's creation date +- `id` (String) The ID of this resource. +- `role` (String) The sub-user's role +- `user_id` (String) The sub-user ID diff --git a/docs/data-sources/s3_service_enable.md b/docs/data-sources/s3_service_enable.md new file mode 100644 index 0000000..6c6c132 --- /dev/null +++ b/docs/data-sources/s3_service_enable.md @@ -0,0 +1,34 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "fptcloud_s3_service_enable Data Source - terraform-provider-fptcloud" +subcategory: "" +description: |- + +--- + +# fptcloud_s3_service_enable (Data Source) + + + + + + +## Schema + +### Required + +- `vpc_id` (String) The ID of the VPC + +### Read-Only + +- `id` (String) The ID of this resource. +- `s3_enable_services` (List of Object) (see [below for nested schema](#nestedatt--s3_enable_services)) + + +### Nested Schema for `s3_enable_services` + +Read-Only: + +- `s3_platform` (String) +- `s3_service_id` (String) +- `s3_service_name` (String) diff --git a/docs/resources/object_storage_access_key.md b/docs/resources/object_storage_access_key.md new file mode 100644 index 0000000..432a658 --- /dev/null +++ b/docs/resources/object_storage_access_key.md @@ -0,0 +1,32 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "fptcloud_object_storage_access_key Resource - terraform-provider-fptcloud" +subcategory: "" +description: |- + +--- + +# fptcloud_object_storage_access_key (Resource) + + + + + + +## Schema + +### Required + +- `region_name` (String) The region name that's are the same with the region name in the S3 service. Currently, we have: HCM-01, HCM-02, HN-01, HN-02 +- `vpc_id` (String) The VPC ID + +### Optional + +- `access_key_id` (String) The access key ID +- `message` (String) The message after creating the access key +- `status` (Boolean) The status after creating the access key + +### Read-Only + +- `id` (String) The ID of this resource. +- `secret_access_key` (String) The secret access key diff --git a/docs/resources/object_storage_bucket.md b/docs/resources/object_storage_bucket.md new file mode 100644 index 0000000..a103f5f --- /dev/null +++ b/docs/resources/object_storage_bucket.md @@ -0,0 +1,32 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "fptcloud_object_storage_bucket Resource - terraform-provider-fptcloud" +subcategory: "" +description: |- + +--- + +# fptcloud_object_storage_bucket (Resource) + + + + + + +## Schema + +### Required + +- `name` (String) The name of the bucket. Bucket names must be unique within an account. +- `region_name` (String) The region name that's are the same with the region name in the S3 service. Currently, we have: HCM-01, HCM-02, HN-01, HN-02 +- `vpc_id` (String) + +### Optional + +- `acl` (String) +- `versioning` (String) The versioning state of the bucket. Accepted values are Enabled or Suspended, default was not set. + +### Read-Only + +- `id` (String) The ID of this resource. +- `status` (Boolean) The status after create or delete the bucket diff --git a/docs/resources/object_storage_bucket_acl.md b/docs/resources/object_storage_bucket_acl.md new file mode 100644 index 0000000..09fa8a8 --- /dev/null +++ b/docs/resources/object_storage_bucket_acl.md @@ -0,0 +1,32 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "fptcloud_object_storage_bucket_acl Resource - terraform-provider-fptcloud" +subcategory: "" +description: |- + +--- + +# fptcloud_object_storage_bucket_acl (Resource) + + + + + + +## Schema + +### Required + +- `bucket_name` (String) Name of the bucket to config the ACL +- `canned_acl` (String) The Access Control List (ACL) status of the bucket which can be one of the following values: private, public-read, default is private +- `region_name` (String) The region name that's are the same with the region name in the S3 service. Currently, we have: HCM-01, HCM-02, HN-01, HN-02 +- `vpc_id` (String) The VPC ID + +### Optional + +- `apply_objects` (Boolean) Apply the ACL to all objects in the bucket + +### Read-Only + +- `id` (String) The ID of this resource. +- `status` (Boolean) The status after configuring the bucket ACL diff --git a/docs/resources/object_storage_bucket_cors.md b/docs/resources/object_storage_bucket_cors.md new file mode 100644 index 0000000..1a548e7 --- /dev/null +++ b/docs/resources/object_storage_bucket_cors.md @@ -0,0 +1,40 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "fptcloud_object_storage_bucket_cors Resource - terraform-provider-fptcloud" +subcategory: "" +description: |- + +--- + +# fptcloud_object_storage_bucket_cors (Resource) + + + + + + +## Schema + +### Required + +- `bucket_name` (String) Name of the bucket +- `region_name` (String) The region name that's are the same with the region name in the S3 service. Currently, we have: HCM-01, HCM-02, HN-01, HN-02 +- `vpc_id` (String) The VPC ID + +### Optional + +- `cors_config` (String) The bucket lifecycle rule in JSON format, support only one rule +- `cors_config_file` (String) Path to the JSON file containing the bucket lifecycle rule, support only one rule + +### Read-Only + +- `bucket_cors_rules` (List of Object) (see [below for nested schema](#nestedatt--bucket_cors_rules)) +- `id` (String) The ID of this resource. +- `status` (Boolean) Status after bucket cors rule is created + + +### Nested Schema for `bucket_cors_rules` + +Read-Only: + +- `id` (String) diff --git a/docs/resources/object_storage_bucket_lifecycle.md b/docs/resources/object_storage_bucket_lifecycle.md new file mode 100644 index 0000000..0e23fad --- /dev/null +++ b/docs/resources/object_storage_bucket_lifecycle.md @@ -0,0 +1,40 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "fptcloud_object_storage_bucket_lifecycle Resource - terraform-provider-fptcloud" +subcategory: "" +description: |- + +--- + +# fptcloud_object_storage_bucket_lifecycle (Resource) + + + + + + +## Schema + +### Required + +- `bucket_name` (String) Name of the bucket +- `region_name` (String) The region name that's are the same with the region name in the S3 service. Currently, we have: HCM-01, HCM-02, HN-01, HN-02 +- `vpc_id` (String) The VPC ID + +### Optional + +- `life_cycle_rule` (String) The bucket lifecycle rule in JSON format, support only one rule +- `life_cycle_rule_file` (String) Path to the JSON file containing the bucket lifecycle rule, support only one rule + +### Read-Only + +- `id` (String) The ID of this resource. +- `rules` (List of Object) (see [below for nested schema](#nestedatt--rules)) +- `state` (Boolean) State after bucket lifecycle rule is created + + +### Nested Schema for `rules` + +Read-Only: + +- `id` (String) diff --git a/docs/resources/object_storage_bucket_policy.md b/docs/resources/object_storage_bucket_policy.md new file mode 100644 index 0000000..887794e --- /dev/null +++ b/docs/resources/object_storage_bucket_policy.md @@ -0,0 +1,32 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "fptcloud_object_storage_bucket_policy Resource - terraform-provider-fptcloud" +subcategory: "" +description: |- + +--- + +# fptcloud_object_storage_bucket_policy (Resource) + + + + + + +## Schema + +### Required + +- `bucket_name` (String) Name of the bucket +- `region_name` (String) The region name that's are the same with the region name in the S3 service. Currently, we have: HCM-01, HCM-02, HN-01, HN-02 +- `vpc_id` (String) The VPC ID + +### Optional + +- `policy` (String) The bucket policy in JSON format +- `policy_file` (String) Path to the JSON file containing the bucket policy + +### Read-Only + +- `id` (String) The ID of this resource. +- `status` (Boolean) Status after bucket policy is created diff --git a/docs/resources/object_storage_bucket_static_website.md b/docs/resources/object_storage_bucket_static_website.md new file mode 100644 index 0000000..249e255 --- /dev/null +++ b/docs/resources/object_storage_bucket_static_website.md @@ -0,0 +1,32 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "fptcloud_object_storage_bucket_static_website Resource - terraform-provider-fptcloud" +subcategory: "" +description: |- + +--- + +# fptcloud_object_storage_bucket_static_website (Resource) + + + + + + +## Schema + +### Required + +- `bucket_name` (String) Name of the bucket +- `region_name` (String) The region name that's are the same with the region name in the S3 service. Currently, we have: HCM-01, HCM-02, HN-01, HN-02 +- `vpc_id` (String) The VPC ID + +### Optional + +- `error_document_key` (String) The object key name to use when a 4XX class error occurs +- `index_document_suffix` (String) Suffix that is appended to a request that is for a directory + +### Read-Only + +- `id` (String) The ID of this resource. +- `status` (Boolean) The status after configuring the bucket website diff --git a/docs/resources/object_storage_bucket_versioning.md b/docs/resources/object_storage_bucket_versioning.md new file mode 100644 index 0000000..de6ad7e --- /dev/null +++ b/docs/resources/object_storage_bucket_versioning.md @@ -0,0 +1,27 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "fptcloud_object_storage_bucket_versioning Resource - terraform-provider-fptcloud" +subcategory: "" +description: |- + +--- + +# fptcloud_object_storage_bucket_versioning (Resource) + + + + + + +## Schema + +### Required + +- `bucket_name` (String) Name of the bucket +- `region_name` (String) The region name that's are the same with the region name in the S3 service. Currently, we have: HCM-01, HCM-02, HN-01, HN-02 +- `versioning_status` (String) Status of the versioning, must be Enabled or Suspended +- `vpc_id` (String) The VPC ID + +### Read-Only + +- `id` (String) The ID of this resource. diff --git a/docs/resources/object_storage_sub_user.md b/docs/resources/object_storage_sub_user.md new file mode 100644 index 0000000..7195b41 --- /dev/null +++ b/docs/resources/object_storage_sub_user.md @@ -0,0 +1,27 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "fptcloud_object_storage_sub_user Resource - terraform-provider-fptcloud" +subcategory: "" +description: |- + +--- + +# fptcloud_object_storage_sub_user (Resource) + + + + + + +## Schema + +### Required + +- `region_name` (String) The region name that's are the same with the region name in the S3 service. Currently, we have: HCM-01, HCM-02, HN-01, HN-02 +- `role` (String) +- `user_id` (String) +- `vpc_id` (String) + +### Read-Only + +- `id` (String) The ID of this resource. diff --git a/docs/resources/object_storage_sub_user_key.md b/docs/resources/object_storage_sub_user_key.md new file mode 100644 index 0000000..4829bc5 --- /dev/null +++ b/docs/resources/object_storage_sub_user_key.md @@ -0,0 +1,28 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "fptcloud_object_storage_sub_user_key Resource - terraform-provider-fptcloud" +subcategory: "" +description: |- + +--- + +# fptcloud_object_storage_sub_user_key (Resource) + + + + + + +## Schema + +### Required + +- `region_name` (String) The region name that's are the same with the region name in the S3 service. Currently, we have: HCM-01, HCM-02, HN-01, HN-02 +- `user_id` (String) The sub user id, can retrieve from data source `fptcloud_object_storage_sub_user` +- `vpc_id` (String) The VPC id that the S3 service belongs to + +### Read-Only + +- `access_key` (String) The access key of the sub user +- `id` (String) The ID of this resource. +- `secret_key` (String) The secret key of the sub user From 11392e27af557ea46ff4c8f9a149d24e76d51dfb Mon Sep 17 00:00:00 2001 From: hoanglm Date: Mon, 18 Nov 2024 23:20:04 +0700 Subject: [PATCH 5/8] [Object Storage] Fix: Address linting issues for improved code quality --- ...asource_object_storage_bucket_lifecycle.go | 5 +- ...tasource_object_storage_sub_user_detail.go | 25 +++++++--- .../object-storage/resource_access_key.go | 35 +++++++++++--- fptcloud/object-storage/resource_bucket.go | 6 ++- .../object-storage/resource_bucket_acl.go | 16 +++++-- .../object-storage/resource_bucket_cors.go | 13 ++++-- .../resource_bucket_lifecycle.go | 17 +++++-- .../object-storage/resource_bucket_policy.go | 6 ++- .../resource_bucket_static_website.go | 14 ++++-- .../resource_bucket_versioning.go | 5 +- fptcloud/object-storage/resource_sub_user.go | 20 ++------ .../object-storage/resource_sub_user_keys.go | 14 ++++-- fptcloud/provider.go | 46 +++++++++---------- main.go | 1 - 14 files changed, 145 insertions(+), 78 deletions(-) diff --git a/fptcloud/object-storage/datasource_object_storage_bucket_lifecycle.go b/fptcloud/object-storage/datasource_object_storage_bucket_lifecycle.go index 5c1ffe3..4fcee2f 100644 --- a/fptcloud/object-storage/datasource_object_storage_bucket_lifecycle.go +++ b/fptcloud/object-storage/datasource_object_storage_bucket_lifecycle.go @@ -143,7 +143,10 @@ func dataSourceBucketLifecycleRead(ctx context.Context, d *schema.ResourceData, d.SetId(bucketName) var formattedData []interface{} if lifeCycleResponse.Total == 0 { - d.Set("life_cycle_rules", make([]interface{}, 0)) + if err := d.Set("life_cycle_rules", make([]interface{}, 0)); err != nil { + d.SetId("") + return diag.FromErr(err) + } } for _, lifecycleRule := range lifeCycleResponse.Rules { data := map[string]interface{}{ diff --git a/fptcloud/object-storage/datasource_object_storage_sub_user_detail.go b/fptcloud/object-storage/datasource_object_storage_sub_user_detail.go index 9660399..8befde7 100644 --- a/fptcloud/object-storage/datasource_object_storage_sub_user_detail.go +++ b/fptcloud/object-storage/datasource_object_storage_sub_user_detail.go @@ -72,16 +72,29 @@ func dataSourceSubUserDetailRead(ctx context.Context, d *schema.ResourceData, m } d.SetId(subUser.UserID) - d.Set("user_id", subUser.UserID) + if err := d.Set("user_id", subUser.UserID); err != nil { + return diag.FromErr(err) + } if subUser.Arn != nil { - d.Set("arn", subUser.Arn) + if err := d.Set("arn", subUser.Arn); err != nil { + return diag.FromErr(err) + } + } + + if err := d.Set("role", subUser.Role); err != nil { + return diag.FromErr(err) + } + if err := d.Set("active", subUser.Active); err != nil { + return diag.FromErr(err) } - d.Set("active", subUser.Active) - d.Set("role", subUser.Role) if subUser.CreatedAt != nil { - d.Set("created_at", subUser.CreatedAt) + if err := d.Set("created_at", subUser.CreatedAt); err != nil { + return diag.FromErr(err) + } + } + if err := d.Set("access_keys", subUser.AccessKeys); err != nil { + return diag.FromErr(err) } - d.Set("access_keys", subUser.AccessKeys) return nil } diff --git a/fptcloud/object-storage/resource_access_key.go b/fptcloud/object-storage/resource_access_key.go index 85b8189..8914685 100644 --- a/fptcloud/object-storage/resource_access_key.go +++ b/fptcloud/object-storage/resource_access_key.go @@ -77,13 +77,25 @@ func resourceAccessKeyCreate(ctx context.Context, d *schema.ResourceData, m inte if resp.Credential.AccessKey != "" { d.SetId(resp.Credential.AccessKey) - d.Set("access_key_id", resp.Credential.AccessKey) - d.Set("secret_access_key", resp.Credential.SecretKey) + if err := d.Set("access_key_id", resp.Credential.AccessKey); err != nil { + d.SetId("") + return diag.FromErr(err) + } + if err := d.Set("secret_access_key", resp.Credential.SecretKey); err != nil { + d.SetId("") + return diag.FromErr(err) + } } - d.Set("status", resp.Status) + if err := d.Set("status", resp.Status); err != nil { + d.SetId("") + return diag.FromErr(err) + } if resp.Message != "" { - d.Set("message", resp.Message) + if err := d.Set("message", resp.Message); err != nil { + d.SetId("") + return diag.FromErr(err) + } } return nil @@ -104,8 +116,14 @@ func resourceAccessKeyRead(ctx context.Context, d *schema.ResourceData, m interf for _, accessKey := range resp.Credentials { for _, key := range accessKey.Credentials { if key.AccessKey == accessKeyId { - d.Set("access_key_id", key.AccessKey) - d.Set("secret_access_key", secretAccessKey) + if err := d.Set("access_key_id", key.AccessKey); err != nil { + d.SetId("") + return diag.FromErr(err) + } + if err := d.Set("secret_access_key", secretAccessKey); err != nil { + d.SetId("") + return diag.FromErr(err) + } break } } @@ -142,7 +160,10 @@ func resourceAccessKeyDelete(ctx context.Context, d *schema.ResourceData, m inte log.Printf("[ERROR] Failed to delete access key %s: %v", accessKeyId, err) return diag.FromErr(err) } - d.Set("status", true) + if err := d.Set("status", true); err != nil { + d.SetId("") + return diag.FromErr(err) + } d.SetId("") return nil } diff --git a/fptcloud/object-storage/resource_bucket.go b/fptcloud/object-storage/resource_bucket.go index 20d9695..366abd8 100644 --- a/fptcloud/object-storage/resource_bucket.go +++ b/fptcloud/object-storage/resource_bucket.go @@ -99,7 +99,7 @@ func resourceBucketCreate(ctx context.Context, d *schema.ResourceData, m interfa bucket := objectStorageService.CreateBucket(req, vpcId, s3ServiceDetail.S3ServiceId) if !bucket.Status { - return diag.Errorf(bucket.Message) + return diag.Errorf("%s", bucket.Message) } return resourceBucketRead(ctx, d, m) } @@ -118,7 +118,9 @@ func resourceBucketRead(_ context.Context, d *schema.ResourceData, m interface{} } for _, b := range bucket.Buckets { if b.Name == d.Get("name").(string) { - d.Set("name", b.Name) + if err := d.Set("name", b.Name); err != nil { + return diag.FromErr(err) + } return nil } } diff --git a/fptcloud/object-storage/resource_bucket_acl.go b/fptcloud/object-storage/resource_bucket_acl.go index 6636813..3d1c0e9 100644 --- a/fptcloud/object-storage/resource_bucket_acl.go +++ b/fptcloud/object-storage/resource_bucket_acl.go @@ -78,10 +78,14 @@ func resourceBucketAclCreate(ctx context.Context, d *schema.ResourceData, m inte r := service.PutBucketAcl(vpcId, s3ServiceDetail.S3ServiceId, bucketName, bucketAclRequest) if !r.Status { - d.Set("status", false) + if err := d.Set("status", false); err != nil { + return diag.Errorf("failed to create bucket ACL for bucket %s", bucketName) + } return diag.Errorf("failed to create bucket ACL for bucket %s", bucketName) } - d.Set("status", true) + if err := d.Set("status", true); err != nil { + return diag.FromErr(err) + } return resourceBucketAclRead(ctx, d, m) } @@ -99,8 +103,12 @@ func resourceBucketAclRead(ctx context.Context, d *schema.ResourceData, m interf if !r.Status { return diag.Errorf("failed to get bucket ACL for bucket %s", bucketName) } - d.Set("canned_acl", r.CannedACL) - d.Set("status", r.Status) + if err := d.Set("canned_acl", r.CannedACL); err != nil { + return diag.FromErr(err) + } + if err := d.Set("status", r.Status); err != nil { + return diag.FromErr(err) + } return nil } diff --git a/fptcloud/object-storage/resource_bucket_cors.go b/fptcloud/object-storage/resource_bucket_cors.go index af36d19..dca724d 100644 --- a/fptcloud/object-storage/resource_bucket_cors.go +++ b/fptcloud/object-storage/resource_bucket_cors.go @@ -112,7 +112,9 @@ func resourceBucketCorsCreate(ctx context.Context, d *schema.ResourceData, m int } r := service.CreateBucketCors(vpcId, s3ServiceDetail.S3ServiceId, bucketName, payload) if !r.Status { - d.Set("status", false) + if err := d.Set("status", false); err != nil { + return diag.FromErr(err) + } return diag.FromErr(fmt.Errorf("%s", r.Message)) } d.SetId(bucketName) @@ -141,7 +143,10 @@ func resourceBucketCorsRead(_ context.Context, d *schema.ResourceData, m interfa d.SetId(bucketName) var formattedData []interface{} if bucketCorsDetails.Total == 0 { - d.Set("bucket_cors_rules", make([]interface{}, 0)) + if err := d.Set("bucket_cors_rules", make([]interface{}, 0)); err != nil { + d.SetId("") + return diag.FromErr(err) + } } for _, corsRuleDetail := range bucketCorsDetails.CorsRules { data := map[string]interface{}{ @@ -198,7 +203,9 @@ func resourceBucketCorsDelete(ctx context.Context, d *schema.ResourceData, m int } r := service.UpdateBucketCors(vpcId, s3ServiceDetail.S3ServiceId, bucketName, payload) if !r.Status { - d.Set("status", false) + if err := d.Set("status", false); err != nil { + return diag.FromErr(err) + } return diag.FromErr(fmt.Errorf("%s", r.Message)) } d.SetId(bucketName) diff --git a/fptcloud/object-storage/resource_bucket_lifecycle.go b/fptcloud/object-storage/resource_bucket_lifecycle.go index 21db918..48b6600 100644 --- a/fptcloud/object-storage/resource_bucket_lifecycle.go +++ b/fptcloud/object-storage/resource_bucket_lifecycle.go @@ -114,12 +114,16 @@ func resourceBucketLifeCycleCreate(ctx context.Context, d *schema.ResourceData, payload["Expiration"] = map[string]interface{}{"ExpiredObjectDeleteMarker": jsonMap.Expiration.ExpiredObjectDeleteMarker} } r := service.PutBucketLifecycle(vpcId, s3ServiceDetail.S3ServiceId, bucketName, payload) + d.SetId(bucketName) if !r.Status { - d.Set("state", false) + if err := d.Set("state", false); err != nil { + d.SetId("") + return diag.FromErr(err) + } return diag.FromErr(fmt.Errorf("%s", r.Message)) } - d.SetId(bucketName) if err := d.Set("state", true); err != nil { + d.SetId("") return diag.FromErr(err) } @@ -145,7 +149,10 @@ func resourceBucketLifeCycleRead(_ context.Context, d *schema.ResourceData, m in d.SetId(bucketName) var formattedData []interface{} if lifeCycleResponse.Total == 0 { - d.Set("life_cycle_rules", make([]interface{}, 0)) + if err := d.Set("life_cycle_rules", make([]interface{}, 0)); err != nil { + d.SetId("") + return diag.FromErr(err) + } } for _, lifecycleRule := range lifeCycleResponse.Rules { data := map[string]interface{}{ @@ -204,7 +211,9 @@ func resourceBucketLifeCycleDelete(ctx context.Context, d *schema.ResourceData, } r := service.DeleteBucketLifecycle(vpcId, s3ServiceDetail.S3ServiceId, bucketName, payload) if !r.Status { - d.Set("state", false) + if err := d.Set("state", false); err != nil { + return diag.FromErr(err) + } return diag.FromErr(fmt.Errorf("%s", r.Message)) } d.SetId(bucketName) diff --git a/fptcloud/object-storage/resource_bucket_policy.go b/fptcloud/object-storage/resource_bucket_policy.go index b1d257d..79cd064 100644 --- a/fptcloud/object-storage/resource_bucket_policy.go +++ b/fptcloud/object-storage/resource_bucket_policy.go @@ -96,8 +96,10 @@ func resourceBucketPolicyCreate(ctx context.Context, d *schema.ResourceData, m i resp := service.PutBucketPolicy(vpcId, s3ServiceDetail.S3ServiceId, bucketName, payload) if !resp.Status { - d.Set("status", false) - return diag.Errorf(fmt.Sprintf("Error create bucket policy: %s", resp.Message)) + if err := d.Set("status", false); err != nil { + return diag.Errorf("failed to create bucket policy: %s", resp.Message) + } + return diag.FromErr(fmt.Errorf("error create bucket policy: %s", resp.Message)) } d.SetId(bucketName) if err := d.Set("status", true); err != nil { diff --git a/fptcloud/object-storage/resource_bucket_static_website.go b/fptcloud/object-storage/resource_bucket_static_website.go index 6636c9f..cbc0c31 100644 --- a/fptcloud/object-storage/resource_bucket_static_website.go +++ b/fptcloud/object-storage/resource_bucket_static_website.go @@ -80,14 +80,18 @@ func resourceBucketStaticWebsiteCreate(ctx context.Context, d *schema.ResourceDa Suffix: indexDocument, Key: errorDocument, }) - + d.SetId(bucketName) if !putBucketWebsite.Status { - diag.Errorf("failed to create bucket website for bucket %s", bucketName) - d.Set("status", false) + if err := d.Set("status", false); err != nil { + d.SetId("") + return diag.Errorf("failed to create bucket website for bucket %s", bucketName) + } return nil } - d.Set("status", true) - d.SetId(bucketName) + if err := d.Set("status", true); err != nil { + d.SetId("") + return diag.FromErr(err) + } return nil } diff --git a/fptcloud/object-storage/resource_bucket_versioning.go b/fptcloud/object-storage/resource_bucket_versioning.go index dd671e4..e744ed4 100644 --- a/fptcloud/object-storage/resource_bucket_versioning.go +++ b/fptcloud/object-storage/resource_bucket_versioning.go @@ -67,7 +67,10 @@ func resourceBucketVersioningCreate(ctx context.Context, d *schema.ResourceData, return diag.FromErr(err) } d.SetId(fmt.Sprintf("%s:%s", bucketName, versioningStatus)) - d.Set("versioning_status", versioningStatus) + if err := d.Set("versioning_status", versioningStatus); err != nil { + d.SetId("") + return diag.FromErr(err) + } return nil } diff --git a/fptcloud/object-storage/resource_sub_user.go b/fptcloud/object-storage/resource_sub_user.go index d2231d1..254e21e 100644 --- a/fptcloud/object-storage/resource_sub_user.go +++ b/fptcloud/object-storage/resource_sub_user.go @@ -62,26 +62,14 @@ func resourceSubUserCreate(ctx context.Context, d *schema.ResourceData, m interf // Set the resource ID after successful creation d.SetId(subUserId) - d.Set("user_id", subUserId) - - return nil -} -func readDetailSubUserOnly(ctx context.Context, d *schema.ResourceData, m interface{}, subUserId string) diag.Diagnostics { - client := m.(*common.Client) - objectStorageService := NewObjectStorageService(client) - vpcId := d.Get("vpc_id").(string) - s3ServiceDetail := getServiceEnableRegion(objectStorageService, vpcId, d.Get("region_name").(string)) - if s3ServiceDetail.S3ServiceId == "" { - return diag.FromErr(fmt.Errorf("region %s is not enabled", d.Get("region_name").(string))) - } - subUser := objectStorageService.DetailSubUser(subUserId, vpcId, s3ServiceDetail.S3ServiceId) - if subUser == nil { + if err := d.Set("user_id", subUserId); err != nil { d.SetId("") - return nil + return diag.FromErr(err) } - d.SetId(subUserId) + return nil } + func resourceSubUserDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { client := m.(*common.Client) objectStorageService := NewObjectStorageService(client) diff --git a/fptcloud/object-storage/resource_sub_user_keys.go b/fptcloud/object-storage/resource_sub_user_keys.go index 36ae2b1..411b707 100644 --- a/fptcloud/object-storage/resource_sub_user_keys.go +++ b/fptcloud/object-storage/resource_sub_user_keys.go @@ -63,8 +63,14 @@ func resourceSubUserAccessKeyCreate(ctx context.Context, d *schema.ResourceData, } d.SetId(resp.Credential.AccessKey) - d.Set("access_key", resp.Credential.AccessKey) - d.Set("secret_key", resp.Credential.SecretKey) + if err := d.Set("access_key", resp.Credential.AccessKey); err != nil { + d.SetId("") + return diag.FromErr(err) + } + if err := d.Set("secret_key", resp.Credential.SecretKey); err != nil { + d.SetId("") + return diag.FromErr(err) + } return nil } @@ -83,7 +89,9 @@ func resourceReadUserDetail(ctx context.Context, d *schema.ResourceData, m inter if subUser.UserID == "" { return diag.Errorf("sub-user with ID %s not found", subUserId) } - d.Set("user_id", subUser.UserID) + if err := d.Set("user_id", subUser.UserID); err != nil { + return diag.FromErr(err) + } return nil } func resourceSubUserAccessKeyDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { diff --git a/fptcloud/provider.go b/fptcloud/provider.go index 89c6273..c769de0 100644 --- a/fptcloud/provider.go +++ b/fptcloud/provider.go @@ -63,29 +63,29 @@ func Provider() *schema.Provider { }, }, DataSourcesMap: map[string]*schema.Resource{ - "fptcloud_storage_policy": fptcloud_storage_policy.DataSourceStoragePolicy(), - "fptcloud_storage": fptcloud_storage.DataSourceStorage(), - "fptcloud_ssh_key": fptcloud_ssh.DataSourceSSHKey(), - "fptcloud_vpc": fptcloud_vpc.NewDataSource(), - "fptcloud_flavor": fptcloud_flavor.DataSourceFlavor(), - "fptcloud_image": fptcloud_image.DataSourceImage(), - "fptcloud_security_group": fptcloud_security_group.DataSourceSecurityGroup(), - "fptcloud_instance": fptcloud_instance.DataSourceInstance(), - "fptcloud_instance_group_policy": fptcloud_instance_group_policy.DataSourceInstanceGroupPolicy(), - "fptcloud_instance_group": fptcloud_instance_group.DataSourceInstanceGroup(), - "fptcloud_floating_ip": fptcloud_floating_ip.DataSourceFloatingIp(), - "fptcloud_subnet": fptcloud_subnet.DataSourceSubnet(), - "fptcloud_object_storage_access_key": fptcloud_object_storage.DataSourceAccessKey(), - "fptcloud_object_storage_sub_user": fptcloud_object_storage.DataSourceSubUser(), - "fptcloud_object_storage_bucket": fptcloud_object_storage.DataSourceBucket(), - "fptcloud_object_storage_bucket_policy": fptcloud_object_storage.DataSourceBucketPolicy(), - "fptcloud_object_storage_bucket_cors": fptcloud_object_storage.DataSourceBucketCors(), - "fptcloud_object_storage_bucket_versioning": fptcloud_object_storage.DataSourceBucketVersioning(), - "fptcloud_object_storage_bucket_lifecycle": fptcloud_object_storage.DataSourceBucketLifecycle(), - "fptcloud_object_storage_bucket_static_website": fptcloud_object_storage.DataSourceBucketStaticWebsite(), - "fptcloud_object_storage_sub_user_detail": fptcloud_object_storage.DataSourceSubUserDetail(), - "fptcloud_s3_service_enable": fptcloud_object_storage.DataSourceS3ServiceEnableResponse(), - "fptcloud_object_storage_bucket_acl": fptcloud_object_storage.DataSourceBucketAcl(), + "fptcloud_storage_policy": fptcloud_storage_policy.DataSourceStoragePolicy(), + "fptcloud_storage": fptcloud_storage.DataSourceStorage(), + "fptcloud_ssh_key": fptcloud_ssh.DataSourceSSHKey(), + "fptcloud_vpc": fptcloud_vpc.NewDataSource(), + "fptcloud_flavor": fptcloud_flavor.DataSourceFlavor(), + "fptcloud_image": fptcloud_image.DataSourceImage(), + "fptcloud_security_group": fptcloud_security_group.DataSourceSecurityGroup(), + "fptcloud_instance": fptcloud_instance.DataSourceInstance(), + "fptcloud_instance_group_policy": fptcloud_instance_group_policy.DataSourceInstanceGroupPolicy(), + "fptcloud_instance_group": fptcloud_instance_group.DataSourceInstanceGroup(), + "fptcloud_floating_ip": fptcloud_floating_ip.DataSourceFloatingIp(), + "fptcloud_subnet": fptcloud_subnet.DataSourceSubnet(), + "fptcloud_object_storage_access_key": fptcloud_object_storage.DataSourceAccessKey(), + "fptcloud_object_storage_sub_user": fptcloud_object_storage.DataSourceSubUser(), + "fptcloud_object_storage_bucket": fptcloud_object_storage.DataSourceBucket(), + "fptcloud_object_storage_bucket_policy": fptcloud_object_storage.DataSourceBucketPolicy(), + "fptcloud_object_storage_bucket_cors": fptcloud_object_storage.DataSourceBucketCors(), + "fptcloud_object_storage_bucket_versioning": fptcloud_object_storage.DataSourceBucketVersioning(), + "fptcloud_object_storage_bucket_lifecycle": fptcloud_object_storage.DataSourceBucketLifecycle(), + "fptcloud_object_storage_bucket_static_website": fptcloud_object_storage.DataSourceBucketStaticWebsite(), + "fptcloud_object_storage_sub_user_detail": fptcloud_object_storage.DataSourceSubUserDetail(), + "fptcloud_s3_service_enable": fptcloud_object_storage.DataSourceS3ServiceEnableResponse(), + "fptcloud_object_storage_bucket_acl": fptcloud_object_storage.DataSourceBucketAcl(), }, ResourcesMap: map[string]*schema.Resource{ "fptcloud_storage": fptcloud_storage.ResourceStorage(), diff --git a/main.go b/main.go index 6e615a5..1ec6d5f 100644 --- a/main.go +++ b/main.go @@ -31,7 +31,6 @@ func main() { providerserver.NewProtocol5(fptcloud.NewXplatProvider("dev")()), fptcloud.Provider().GRPCProvider, } - log.Printf("[DEBUG] providers: ", providers) muxServer, err := tf5muxserver.NewMuxServer(ctx, providers...) if err != nil { From 566285c1b3d61772c79dd5111e1c1d306ac74753 Mon Sep 17 00:00:00 2001 From: hoanglm Date: Wed, 27 Nov 2024 01:35:07 +0700 Subject: [PATCH 6/8] [WIP][Object Storage] Update: test --- .../object-storage/model_object_storage.go | 262 ++++++++++++ ...t_storage.go => object_storage_service.go} | 262 +----------- .../object_storage_service_test.go | 391 ++++++++++++++++++ .../object-storage/resource_access_key.go | 8 +- 4 files changed, 664 insertions(+), 259 deletions(-) create mode 100644 fptcloud/object-storage/model_object_storage.go rename fptcloud/object-storage/{datasource_object_storage.go => object_storage_service.go} (65%) create mode 100644 fptcloud/object-storage/object_storage_service_test.go diff --git a/fptcloud/object-storage/model_object_storage.go b/fptcloud/object-storage/model_object_storage.go new file mode 100644 index 0000000..9cda33c --- /dev/null +++ b/fptcloud/object-storage/model_object_storage.go @@ -0,0 +1,262 @@ +package fptcloud_object_storage + +type AbortIncompleteMultipartUpload struct { + DaysAfterInitiation int `json:"DaysAfterInitiation"` +} + +type AccessKey struct { + Credentials []struct { + ID string `json:"id"` + Credentials []struct { + AccessKey string `json:"accessKey"` + Active bool `json:"active"` + CreatedDate interface{} `json:"createdDate,omitempty"` + } `json:"credentials"` + } `json:"credentials"` +} + +type BucketAclRequest struct { + CannedAcl string `json:"cannedAcl"` + ApplyObjects bool `json:"applyObjects"` +} + +type BucketAclResponse struct { + Status bool `json:"status"` + Owner struct { + DisplayName string `json:"DisplayName"` + ID string `json:"ID"` + } `json:"Owner"` + Grants []struct { + Grantee struct { + DisplayName string `json:"DisplayName"` + ID string `json:"ID"` + Type string `json:"Type"` + } `json:"Grantee"` + Permission string `json:"Permission"` + } `json:"Grants"` + CannedACL string `json:"CannedACL"` +} + +type BucketCors struct { + CorsRules []CorsRule `json:"CORSRules"` +} + +type BucketCorsResponse struct { + Status bool `json:"status"` + CorsRules []struct { + ID string `json:"ID"` + AllowedHeaders []string `json:"AllowedHeaders,omitempty"` + AllowedMethods []string `json:"AllowedMethods"` + AllowedOrigins []string `json:"AllowedOrigins"` + ExposeHeaders []string `json:"ExposeHeaders,omitempty"` + MaxAgeSeconds int `json:"MaxAgeSeconds"` + } `json:"cors_rules"` + Total int `json:"total"` +} + +type BucketLifecycleResponse struct { + Status bool `json:"status"` + Rules []struct { + Expiration struct { + ExpiredObjectDeleteMarker bool `json:"ExpiredObjectDeleteMarker,omitempty"` + Days int `json:"Days,omitempty"` + } `json:"Expiration"` + ID string `json:"ID"` + Filter struct { + Prefix string `json:"Prefix"` + } `json:"Filter,omitempty"` + Status string `json:"Status"` + NoncurrentVersionExpiration struct { + NoncurrentDays int `json:"NoncurrentDays"` + } `json:"NoncurrentVersionExpiration"` + AbortIncompleteMultipartUpload struct { + DaysAfterInitiation int `json:"DaysAfterInitiation"` + } `json:"AbortIncompleteMultipartUpload"` + Prefix string `json:"Prefix,omitempty"` + } `json:"rules"` + Total int `json:"total"` +} + +type BucketPolicyRequest struct { + Policy string `json:"policy"` +} + +type BucketPolicyResponse struct { + Status bool `json:"status"` + Policy string `json:"policy"` +} + +type BucketRequest struct { + Name string `json:"name"` + Versioning string `json:"versioning,omitempty"` + Acl string `json:"acl"` +} + +type BucketVersioningRequest struct { + Status string `json:"status"` // "Enabled" or "Suspended" +} + +type BucketVersioningResponse struct { + Status bool `json:"status"` + Config string `json:"config"` // "Enabled" or "Suspended" +} + +type BucketWebsiteRequest struct { + Key string `json:"key"` + Suffix string `json:"suffix"` + Bucket string `json:"bucket"` +} + +type BucketWebsiteResponse struct { + Status bool `json:"status"` + Config struct { + ResponseMetadata struct { + RequestID string `json:"RequestId"` + HostID string `json:"HostId"` + HTTPStatusCode int `json:"HTTPStatusCode"` + HTTPHeaders struct { + XAmzRequestID string `json:"x-amz-request-id"` + ContentType string `json:"content-type"` + ContentLength string `json:"content-length"` + Date string `json:"date"` + } `json:"HTTPHeaders"` + RetryAttempts int `json:"RetryAttempts"` + } `json:"ResponseMetadata"` + IndexDocument struct { + Suffix string `json:"Suffix"` + } `json:"IndexDocument"` + ErrorDocument struct { + Key string `json:"Key"` + } `json:"ErrorDocument"` + } `json:"config,omitempty"` +} + +type CommonResponse struct { + Status bool `json:"status"` + Message string `json:"message,omitempty"` +} + +type CorsRule struct { + ID string `json:"ID,omitempty"` + AllowedOrigins []string `json:"AllowedOrigins"` + AllowedMethods []string `json:"AllowedMethods"` + ExposeHeaders []string `json:"ExposeHeaders,omitempty"` + AllowedHeaders []string `json:"AllowedHeaders,omitempty"` + MaxAgeSeconds int `json:"MaxAgeSeconds"` +} + +type CreateAccessKeyResponse struct { + Status bool `json:"status"` + Message string `json:"message,omitempty"` + Credential struct { + AccessKey string `json:"accessKey"` + SecretKey string `json:"secretKey"` + Active interface{} `json:"active"` + CreatedDate interface{} `json:"createdDate,omitempty"` + } `json:"credential,omitempty"` +} + +type DetailSubUser struct { + UserID string `json:"user_id"` + Arn interface{} `json:"arn,omitempty"` + Active bool `json:"active"` + Role string `json:"role"` + CreatedAt interface{} `json:"created_at,omitempty"` + AccessKeys []string `json:"access_keys"` +} + +type Expiration struct { + Days int `json:"Days,omitempty"` + ExpiredObjectDeleteMarker bool `json:"ExpiredObjectDeleteMarker,omitempty"` +} + +type Filter struct { + Prefix string `json:"Prefix"` +} + +type ListBucketResponse struct { + Buckets []struct { + Name string `json:"Name"` + CreationDate string `json:"CreationDate"` + IsEmpty bool `json:"isEmpty"` + S3ServiceID string `json:"s3_service_id"` + IsEnabledLogging bool `json:"isEnabledLogging"` + Endpoint string `json:"endpoint"` + } `json:"buckets"` + Total int `json:"total"` +} + +type NoncurrentVersionExpiration struct { + NoncurrentDays int `json:"NoncurrentDays"` +} + +type PutBucketAclResponse struct { + Status bool `json:"status"` + TaskID string `json:"taskId"` +} + +type S3BucketLifecycleConfig struct { + ID string `json:"ID"` + Filter Filter `json:"Filter"` + Expiration Expiration `json:"Expiration"` + NoncurrentVersionExpiration NoncurrentVersionExpiration `json:"NoncurrentVersionExpiration"` + AbortIncompleteMultipartUpload AbortIncompleteMultipartUpload `json:"AbortIncompleteMultipartUpload"` +} + +type S3ServiceEnableResponse struct { + Data []struct { + S3ServiceName string `json:"s3_service_name"` + S3ServiceID string `json:"s3_service_id"` + S3Platform string `json:"s3_platform"` + DefaultUser interface{} `json:"default_user"` + MigrateQuota int `json:"migrate_quota"` + SyncQuota int `json:"sync_quota"` + RgwTotalNodes int `json:"rgw_total_nodes"` + RgwUserActiveNodes int `json:"rgw_user_active_nodes"` + HasUnusualConfig interface{} `json:"has_unusual_config"` + } `json:"data"` + Total int `json:"total"` +} + +type Statement struct { + Sid string `json:"Sid"` + Effect string `json:"Effect"` + Principal map[string]interface{} `json:"Principal"` + Action []string `json:"Action"` + Resource []string `json:"Resource"` +} + +type SubUser struct { + Role string `json:"role"` + UserId string `json:"user_id"` +} + +type SubUserCreateKeyResponse struct { + Status bool `json:"status"` + Message string `json:"message,omitempty"` + Credential struct { + AccessKey string `json:"accessKey,omitempty"` + SecretKey string `json:"secretKey,omitempty"` + Active interface{} `json:"active,omitempty"` + CreatedDate interface{} `json:"createdDate,omitempty"` + } `json:"credential,omitempty"` +} + +type SubUserCreateRequest struct { + Username string `json:"username"` + DisplayName string `json:"display_name"` + Email string `json:"email"` + Permissions []string `json:"permissions"` +} + +type SubUserListResponse struct { + SubUsers []struct { + UserID string `json:"user_id"` + Arn string `json:"arn"` + Active bool `json:"active"` + Role string `json:"role"` + CreatedAt interface{} `json:"created_at,omitempty"` + AccessKeys interface{} `json:"access_keys,omitempty"` + } `json:"sub_users"` + Total int `json:"total"` +} diff --git a/fptcloud/object-storage/datasource_object_storage.go b/fptcloud/object-storage/object_storage_service.go similarity index 65% rename from fptcloud/object-storage/datasource_object_storage.go rename to fptcloud/object-storage/object_storage_service.go index 187c249..7059a7f 100644 --- a/fptcloud/object-storage/datasource_object_storage.go +++ b/fptcloud/object-storage/object_storage_service.go @@ -6,254 +6,6 @@ import ( common "terraform-provider-fptcloud/commons" ) -// SubUserCreateRequest represents the request body for creating a sub-user -type SubUserCreateRequest struct { - Username string `json:"username"` - DisplayName string `json:"display_name"` - Email string `json:"email"` - Permissions []string `json:"permissions"` -} -type AccessKey struct { - Credentials []struct { - ID string `json:"id"` - Credentials []struct { - AccessKey string `json:"accessKey"` - Active bool `json:"active"` - CreatedDate interface{} `json:"createdDate"` - } `json:"credentials"` - } `json:"credentials"` -} -type CreateAccessKeyResponse struct { - Status bool `json:"status"` - Message string `json:"message,omitempty"` - Credential struct { - AccessKey string `json:"accessKey"` - SecretKey string `json:"secretKey"` - Active interface{} `json:"active"` - CreatedDate interface{} `json:"createdDate"` - } `json:"credential,omitempty"` -} -type SubUserCreateKeyResponse struct { - Status bool `json:"status"` - Message string `json:"message,omitempty"` - Credential struct { - AccessKey string `json:"accessKey,omitempty"` - SecretKey string `json:"secretKey,omitempty"` - Active interface{} `json:"active,omitempty"` - CreatedDate interface{} `json:"createdDate,omitempty"` - } `json:"credential,omitempty"` -} - -type SubUser struct { - Role string `json:"role"` - UserId string `json:"user_id"` -} -type SubUserListResponse struct { - SubUsers []struct { - UserID string `json:"user_id"` - Arn string `json:"arn"` - Active bool `json:"active"` - Role string `json:"role"` - CreatedAt interface{} `json:"created_at"` - AccessKeys interface{} `json:"access_keys"` - } `json:"sub_users"` - Total int `json:"total"` -} -type CommonResponse struct { - Status bool `json:"status"` - Message string `json:"message,omitempty"` -} -type CorsRule struct { - ID string `json:"ID,omitempty"` - AllowedOrigins []string `json:"AllowedOrigins"` - AllowedMethods []string `json:"AllowedMethods"` - ExposeHeaders []string `json:"ExposeHeaders,omitempty"` - AllowedHeaders []string `json:"AllowedHeaders,omitempty"` - MaxAgeSeconds int `json:"MaxAgeSeconds"` -} -type BucketCors struct { - CorsRules []CorsRule `json:"CORSRules"` -} -type BucketCorsResponse struct { - Status bool `json:"status"` - CorsRules []struct { - ID string `json:"ID"` - AllowedHeaders []string `json:"AllowedHeaders,omitempty"` - AllowedMethods []string `json:"AllowedMethods"` - AllowedOrigins []string `json:"AllowedOrigins"` - ExposeHeaders []string `json:"ExposeHeaders,omitempty"` - MaxAgeSeconds int `json:"MaxAgeSeconds"` - } `json:"cors_rules"` - Total int `json:"total"` -} - -type BucketPolicyResponse struct { - Status bool `json:"status"` - Policy string `json:"policy"` -} -type BucketPolicyRequest struct { - Policy string `json:"policy"` -} -type Statement struct { - Sid string `json:"Sid"` - Effect string `json:"Effect"` - Principal map[string]interface{} `json:"Principal"` - Action []string `json:"Action"` - Resource []string `json:"Resource"` -} - -type BucketVersioningRequest struct { - Status string `json:"status"` // "Enabled" or "Suspended" -} -type BucketVersioningResponse struct { - Status bool `json:"status"` - Config string `json:"config"` // "Enabled" or "Suspended" -} - -type BucketAclResponse struct { - Status bool `json:"status"` - Owner struct { - DisplayName string `json:"DisplayName"` - ID string `json:"ID"` - } `json:"Owner"` - Grants []struct { - Grantee struct { - DisplayName string `json:"DisplayName"` - ID string `json:"ID"` - Type string `json:"Type"` - } `json:"Grantee"` - Permission string `json:"Permission"` - } `json:"Grants"` - CannedACL string `json:"CannedACL"` -} -type BucketAclRequest struct { - CannedAcl string `json:"cannedAcl"` - ApplyObjects bool `json:"applyObjects"` -} -type PutBucketAclResponse struct { - Status bool `json:"status"` - // TaskID may be empty if applyObjects is false, if applyObjects is true, the taskID will be returned - TaskID string `json:"taskId"` -} -type BucketWebsiteRequest struct { - Key string `json:"key"` - Suffix string `json:"suffix"` - Bucket string `json:"bucket"` -} -type BucketWebsiteResponse struct { - Status bool `json:"status"` - Config struct { - ResponseMetadata struct { - RequestID string `json:"RequestId"` - HostID string `json:"HostId"` - HTTPStatusCode int `json:"HTTPStatusCode"` - HTTPHeaders struct { - XAmzRequestID string `json:"x-amz-request-id"` - ContentType string `json:"content-type"` - ContentLength string `json:"content-length"` - Date string `json:"date"` - } `json:"HTTPHeaders"` - RetryAttempts int `json:"RetryAttempts"` - } `json:"ResponseMetadata"` - IndexDocument struct { - Suffix string `json:"Suffix"` - } `json:"IndexDocument"` - ErrorDocument struct { - Key string `json:"Key"` - } `json:"ErrorDocument"` - } `json:"config,omitempty"` -} - -type S3ServiceEnableResponse struct { - Data []struct { - S3ServiceName string `json:"s3_service_name"` - S3ServiceID string `json:"s3_service_id"` - S3Platform string `json:"s3_platform"` - DefaultUser interface{} `json:"default_user"` - MigrateQuota int `json:"migrate_quota"` - SyncQuota int `json:"sync_quota"` - RgwTotalNodes int `json:"rgw_total_nodes"` - RgwUserActiveNodes int `json:"rgw_user_active_nodes"` - HasUnusualConfig interface{} `json:"has_unusual_config"` - } `json:"data"` - Total int `json:"total"` -} - -// Bucket represents the response structure for a created bucket -type BucketRequest struct { - Name string `json:"name"` - Region string `json:"region"` - Versioning string `json:"versioning"` - Acl string `json:"acl"` -} -type ListBucketResponse struct { - Buckets []struct { - Name string `json:"Name"` - CreationDate string `json:"CreationDate"` - IsEmpty bool `json:"isEmpty"` - S3ServiceID string `json:"s3_service_id"` - IsEnabledLogging bool `json:"isEnabledLogging"` - Endpoint string `json:"endpoint"` - } `json:"buckets"` - Total int `json:"total"` -} -type BucketLifecycleResponse struct { - Status bool `json:"status"` - Rules []struct { - Expiration struct { - ExpiredObjectDeleteMarker bool `json:"ExpiredObjectDeleteMarker,omitempty"` - Days int `json:"Days,omitempty"` - } `json:"Expiration"` - ID string `json:"ID"` - Filter struct { - Prefix string `json:"Prefix"` - } `json:"Filter,omitempty"` - Status string `json:"Status"` - NoncurrentVersionExpiration struct { - NoncurrentDays int `json:"NoncurrentDays"` - } `json:"NoncurrentVersionExpiration"` - AbortIncompleteMultipartUpload struct { - DaysAfterInitiation int `json:"DaysAfterInitiation"` - } `json:"AbortIncompleteMultipartUpload"` - Prefix string `json:"Prefix,omitempty"` - } `json:"rules"` - Total int `json:"total"` -} - -type DetailSubUser struct { - UserID string `json:"user_id"` - Arn interface{} `json:"arn"` - Active bool `json:"active"` - Role string `json:"role"` - CreatedAt interface{} `json:"created_at"` - AccessKeys []string `json:"access_keys"` -} - -type S3BucketLifecycleConfig struct { - ID string `json:"ID"` - Filter Filter `json:"Filter"` - Expiration Expiration `json:"Expiration"` - NoncurrentVersionExpiration NoncurrentVersionExpiration `json:"NoncurrentVersionExpiration"` - AbortIncompleteMultipartUpload AbortIncompleteMultipartUpload `json:"AbortIncompleteMultipartUpload"` -} - -type Filter struct { - Prefix string `json:"Prefix"` -} - -type Expiration struct { - Days int `json:"Days,omitempty"` - ExpiredObjectDeleteMarker bool `json:"ExpiredObjectDeleteMarker,omitempty"` -} - -type NoncurrentVersionExpiration struct { - NoncurrentDays int `json:"NoncurrentDays"` -} - -type AbortIncompleteMultipartUpload struct { - DaysAfterInitiation int `json:"DaysAfterInitiation"` -} - // ObjectStorageService defines the interface for object storage operations type ObjectStorageService interface { CheckServiceEnable(vpcId string) S3ServiceEnableResponse @@ -265,7 +17,7 @@ type ObjectStorageService interface { // Access key ListAccessKeys(vpcId, s3ServiceId string) (AccessKey, error) - DeleteAccessKey(vpcId, s3ServiceId, accessKeyId string) error + DeleteAccessKey(vpcId, s3ServiceId, accessKeyId string) CommonResponse CreateAccessKey(vpcId, s3ServiceId string) *CreateAccessKeyResponse // Sub user @@ -336,13 +88,13 @@ func (s *ObjectStorageServiceImpl) CreateBucket(req BucketRequest, vpcId, s3Serv return CommonResponse{Status: false, Message: err.Error()} } - var bucket BucketRequest + var bucket CommonResponse err = json.Unmarshal(resp, &bucket) if err != nil { return CommonResponse{Status: false, Message: err.Error()} } - return CommonResponse{Status: true, Message: "Bucket created successfully"} + return CommonResponse{Status: bucket.Status, Message: bucket.Message} } // CreateSubUser creates a new sub-user @@ -358,7 +110,6 @@ func (s *ObjectStorageServiceImpl) CreateSubUser(req SubUser, vpcId, s3ServiceId if err != nil { return &CommonResponse{Status: false, Message: err.Error()} } - return &CommonResponse{Status: subUser.Status, Message: "Sub-user created successfully"} } @@ -437,13 +188,14 @@ func (s *ObjectStorageServiceImpl) DeleteBucket(vpcId, s3ServiceId, bucketName s return CommonResponse{Status: true, Message: "Bucket deleted successfully"} } -func (s *ObjectStorageServiceImpl) DeleteAccessKey(vpcId, s3ServiceId, accessKeyId string) error { +func (s *ObjectStorageServiceImpl) DeleteAccessKey(vpcId, s3ServiceId, accessKeyId string) CommonResponse { apiPath := common.ApiPath.DeleteAccessKey(vpcId, s3ServiceId) body := map[string]string{"accessKey": accessKeyId} + if _, err := s.client.SendDeleteRequestWithBody(apiPath, body); err != nil { - return fmt.Errorf("failed to delete access key: %v", err) + return CommonResponse{Status: false, Message: err.Error()} } - return nil + return CommonResponse{Status: true, Message: "Access key deleted successfully"} } // Implement bucket policy methods diff --git a/fptcloud/object-storage/object_storage_service_test.go b/fptcloud/object-storage/object_storage_service_test.go new file mode 100644 index 0000000..fea7cdb --- /dev/null +++ b/fptcloud/object-storage/object_storage_service_test.go @@ -0,0 +1,391 @@ +package fptcloud_object_storage_test + +import ( + "fmt" + "testing" + + common "terraform-provider-fptcloud/commons" + fptcloud_object_storage "terraform-provider-fptcloud/fptcloud/object-storage" + + "github.com/stretchr/testify/assert" +) + +func TestCreateResourceAccessKey_ReturnsResourceAccessKeyIDWhenSuccess(t *testing.T) { + mockResponse := `{ + "status": true, + "message": "Create resource access key successfully", + "credential": { + "accessKey": "11111111-aaaa-1111-bbbb-111111111111", + "secretKey": "22222222-bbbb-2222-cccc-222222222222" + } + }` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/s3_service_id/user/credentials": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + vpcId := "vpc_id" + s3ServiceId := "s3_service_id" + resourceAccessKeyID := service.CreateAccessKey(vpcId, s3ServiceId) + assert.NotNil(t, resourceAccessKeyID) + + assert.Equal(t, "11111111-aaaa-1111-bbbb-111111111111", resourceAccessKeyID.Credential.AccessKey) + assert.Equal(t, "22222222-bbbb-2222-cccc-222222222222", resourceAccessKeyID.Credential.SecretKey) + assert.Equal(t, true, resourceAccessKeyID.Status) + assert.Equal(t, "Create resource access key successfully", resourceAccessKeyID.Message) +} + +func TestCreateResourceAccessKey_ReturnsErrorWhenFailed(t *testing.T) { + mockResponse := `{ + "status": false, + "message": "Failed to create resource access key", + "credential": {} + }` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/s3_service_id/user/credentials": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + vpcId := "vpc_id" + s3ServiceId := "s3_service_id" + resourceAccessKeyID := service.CreateAccessKey(vpcId, s3ServiceId) + assert.NotNil(t, resourceAccessKeyID) + + assert.Equal(t, "", resourceAccessKeyID.Credential.AccessKey) + assert.Equal(t, "", resourceAccessKeyID.Credential.SecretKey) + assert.Equal(t, false, resourceAccessKeyID.Status) + assert.Equal(t, "Failed to create resource access key", resourceAccessKeyID.Message) +} + +func TestDeleteResouurceAccessKey_ReturnOkWhenSuccess(t *testing.T) { + mockResponse := `{ + "status": true, + "message": "Delete resource access key successfully" + }` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/s3_service_id/user/credentials/credential_id": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + vpcId := "vpc_id" + s3ServiceId := "s3_service_id" + credentialId := "credential_id" + res := service.DeleteAccessKey(vpcId, s3ServiceId, credentialId) + assert.NotNil(t, res) + assert.Equal(t, true, res.Status) + assert.Equal(t, "Access key deleted successfully", res.Message) +} + +func TestListAccessKeys_ReturnAccessKeysWhenSuccess(t *testing.T) { + mockResponse := `{ + "credentials": [ + { + "id": "credential_id", + "credentials": [ + { + "accessKey": "11111111-aaaa-1111-bbbb-111111111111", + "active": true + } + ] + } + ] + }` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/user/credentials?s3_service_id=s3_service_id": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + vpcId := "vpc_id" + s3ServiceId := "s3_service_id" + accessKeys, _ := service.ListAccessKeys(vpcId, s3ServiceId) + assert.NotNil(t, accessKeys) + assert.Equal(t, "credential_id", accessKeys.Credentials[0].ID) + assert.Equal(t, "11111111-aaaa-1111-bbbb-111111111111", accessKeys.Credentials[0].Credentials[0].AccessKey) + assert.Equal(t, true, accessKeys.Credentials[0].Credentials[0].Active) +} + +func TestCreateBucket_ReturnsBucketIDWhenSuccess(t *testing.T) { + mockResponse := `{ + "status": true, + "message": "Create bucket successfully" + }` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/s3_service_id/bucket": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + vpcId := "vpc_id" + s3ServiceId := "s3_service_id" + bucketRequest := fptcloud_object_storage.BucketRequest{ + Name: "bucket_name", + Acl: "private", + } + r := service.CreateBucket(bucketRequest, vpcId, s3ServiceId) + assert.NotNil(t, r) + assert.Equal(t, true, r.Status) +} + +func TestCreateBucket_ReturnsErrorWhenFailed(t *testing.T) { + mockResponse := `{ + "status": false, + "message": "Failed to create bucket", + }` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/s3_service_id/bucket": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + vpcId := "vpc_id" + s3ServiceId := "s3_service_id" + bucketRequest := fptcloud_object_storage.BucketRequest{ + Name: "bucket_name", + Acl: "private", + } + r := service.CreateBucket(bucketRequest, vpcId, s3ServiceId) + assert.NotNil(t, r) + assert.Equal(t, false, r.Status) +} + +func TestListBuckets_ReturnsBucketsWhenSuccess(t *testing.T) { + mockResponse := `{ + "buckets": [ + { + "Name": "bucket_name", + "CreationDate": "2024-11-26T16:43:55.121000+00:00", + "isEmpty": false, + "s3_service_id": "s3_service_id", + "isEnabledLogging": false, + "endpoint": "https://xxxx-xxx.xyz.com" + } + ], + "total": 1 + }` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/buckets?page=5&page_size=10&s3_service_id=s3_service_id": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + vpcId := "vpc_id" + s3ServiceId := "s3_service_id" + buckets := service.ListBuckets(vpcId, s3ServiceId, 5, 10) + assert.NotNil(t, buckets) + assert.Equal(t, "bucket_name", buckets.Buckets[0].Name) + assert.Equal(t, "2024-11-26T16:43:55.121000+00:00", buckets.Buckets[0].CreationDate) + assert.Equal(t, false, buckets.Buckets[0].IsEmpty) + assert.Equal(t, "s3_service_id", buckets.Buckets[0].S3ServiceID) + assert.Equal(t, false, buckets.Buckets[0].IsEnabledLogging) + assert.Equal(t, "https://xxxx-xxx.xyz.com", buckets.Buckets[0].Endpoint) +} + +func TestListBuckets_ReturnsErrorWhenFailed(t *testing.T) { + mockResponse := `{ + "buckets": [], + "total": 0 + }` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/buckets?page=5&page_size=10&s3_service_id=s3_service_id": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + vpcId := "vpc_id" + s3ServiceId := "s3_service_id" + buckets := service.ListBuckets(vpcId, s3ServiceId, 5, 10) + assert.NotNil(t, buckets) + assert.Equal(t, 0, buckets.Total) +} + +func TestDeleteBucket_ReturnsOkWhenSuccess(t *testing.T) { + mockResponse := `{ + "status": true + }` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/s3_service_id": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + vpcId := "vpc_id" + s3ServiceId := "s3_service_id" + bucketName := "bucket_name" + res := service.DeleteBucket(vpcId, s3ServiceId, bucketName) + assert.NotNil(t, res) + assert.Equal(t, true, res.Status) +} + +func TestCreateSubUser_ReturnsTrueWhenSuccess(t *testing.T) { + mockResponse := `{ + "status": true, + "message": "Sub-user created successfully" + }` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/s3_service_id/sub-users/create": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + vpcId := "vpc_id" + s3ServiceId := "s3_service_id" + subUserRequest := fptcloud_object_storage.SubUser{ + Role: "admin", + UserId: "user_id", + } + r := service.CreateSubUser(subUserRequest, vpcId, s3ServiceId) + fmt.Println("Response: ", r) + assert.NotNil(t, r) + assert.Equal(t, true, r.Status) + assert.Equal(t, "Sub-user created successfully", r.Message) +} + +func TestCreateSubUser_ReturnsFalseWhenFailed(t *testing.T) { + mockResponse := `{ + "status": false + }` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/s3_service_id/sub-users/wrong_endpoint": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + vpcId := "vpc_id" + s3ServiceId := "s3_service_id" + subUserRequest := fptcloud_object_storage.SubUser{ + Role: "admin", + UserId: "user_id", + } + r := service.CreateSubUser(subUserRequest, vpcId, s3ServiceId) + fmt.Println("Response: ", r) + assert.NotNil(t, r) + assert.Equal(t, false, r.Status) +} + +func TestDeleteSubUser_ReturnOkWhenSuccess(t *testing.T) { + mockResponse := `{}` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/s3_service_id/sub-users/sub_user_id/delete": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + vpcId := "vpc_id" + s3ServiceId := "s3_service_id" + subUserId := "sub_user_id" + err := service.DeleteSubUser(vpcId, s3ServiceId, subUserId) + assert.Nil(t, err) +} + +func TestListSubUsers_ReturnsSubUsersWhenSuccess(t *testing.T) { + mockResponse := `{ + "sub_users": [ + { + "user_id": "sgn-replicate123123", + "arn": "arn:aws:iam:::user/xxx:sgn-replicate123123", + "active": true, + "role": "SubUserReadWrite" + } + ], + "total": 1 + }` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/s3_service_id/sub-users/list?page=5&page_size=25": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + vpcId := "vpc_id" + s3ServiceId := "s3_service_id" + subUsers, err := service.ListSubUsers(vpcId, s3ServiceId, 5, 25) + fmt.Println("SubUsers: ", subUsers) + fmt.Println("err: ", err) + assert.NotNil(t, subUsers) + assert.Nil(t, err) + assert.Equal(t, 1, subUsers.Total) + assert.Equal(t, "sgn-replicate123123", subUsers.SubUsers[0].UserID) + assert.Equal(t, "arn:aws:iam:::user/xxx:sgn-replicate123123", subUsers.SubUsers[0].Arn) + assert.Equal(t, true, subUsers.SubUsers[0].Active) + assert.Equal(t, "SubUserReadWrite", subUsers.SubUsers[0].Role) +} + +func TestListSubUsers_ReturnsErrorWhenFailed(t *testing.T) { + mockResponse := `{ + "sub_users": [], + "total": 0, + }` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/s3_service_id/sub-users/list?page=5&page_size=25": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + vpcId := "vpc_id" + s3ServiceId := "s3_service_id" + subUsers, err := service.ListSubUsers(vpcId, s3ServiceId, 5, 25) + assert.NotNil(t, subUsers) + assert.NotNil(t, err) + assert.Equal(t, 0, subUsers.Total) +} + +func TestGetDetailSubUser_ReturnOkWhenSuccess(t *testing.T) { + mockResponse := ` + { + "user_id": "sgn-replicate123123", + "active": true, + "role": "SubUserReadWrite", + "access_keys": [] + } + ` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/s3_service_id/sub-users/sub_user_id": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + vpcId := "vpc_id" + s3ServiceId := "s3_service_id" + subUserId := "sub_user_id" + subUser := service.DetailSubUser(vpcId, s3ServiceId, subUserId) + assert.NotNil(t, subUser) + assert.Equal(t, "sgn-replicate123123", subUser.UserID) + assert.Equal(t, true, subUser.Active) + assert.Equal(t, "SubUserReadWrite", subUser.Role) +} + +func TestCreateSubUserAccessKey_ReturnsAccessKeyWhenSuccess(t *testing.T) { + mockResponse := `{ + "status": true, + "credential": { + "accessKey": "example_access_key", + "secretKey": "example_secret_key" + } + }` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/s3_service_id/sub-users/sub_user_id/credentials/create": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + vpcId := "vpc_id" + s3ServiceId := "s3_service_id" + subUserId := "sub_user_id" + accessKey := service.CreateSubUserAccessKey(vpcId, s3ServiceId, subUserId) + fmt.Println("AccessKey: ", accessKey) + assert.NotNil(t, accessKey) + assert.Equal(t, "example_access_key", accessKey.Credential.AccessKey) + assert.Equal(t, "example_secret_key", accessKey.Credential.SecretKey) + assert.Equal(t, true, accessKey.Status) +} + +func TestCreateSubUserAccessKey_ReturnsErrorWhenFailed(t *testing.T) { + mockResponse := `{ + "status": false, + }` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/s3_service_id/sub-users/sub_user_id/credentials/create": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + vpcId := "vpc_id" + s3ServiceId := "s3_service_id" + subUserId := "sub_user_id" + accessKey := service.CreateSubUserAccessKey(vpcId, s3ServiceId, subUserId) + fmt.Println("AccessKey: ", accessKey) + assert.NotNil(t, accessKey) + assert.Equal(t, "", accessKey.Credential.AccessKey) + assert.Equal(t, "", accessKey.Credential.SecretKey) + assert.Equal(t, false, accessKey.Status) +} + +func TestDeleteSubUserAccessKey_ReturnOkWhenSuccess(t *testing.T) { + +} \ No newline at end of file diff --git a/fptcloud/object-storage/resource_access_key.go b/fptcloud/object-storage/resource_access_key.go index 8914685..5811dfd 100644 --- a/fptcloud/object-storage/resource_access_key.go +++ b/fptcloud/object-storage/resource_access_key.go @@ -155,10 +155,10 @@ func resourceAccessKeyDelete(ctx context.Context, d *schema.ResourceData, m inte return diag.Errorf("access_key_id is required for deletion") } - err := service.DeleteAccessKey(vpcId, s3ServiceDetail.S3ServiceId, accessKeyId) - if err != nil { - log.Printf("[ERROR] Failed to delete access key %s: %v", accessKeyId, err) - return diag.FromErr(err) + data := service.DeleteAccessKey(vpcId, s3ServiceDetail.S3ServiceId, accessKeyId) + if !data.Status { + log.Printf("[ERROR] Failed to delete access key %s: %v", accessKeyId, data.Message) + return diag.Errorf(data.Message) } if err := d.Set("status", true); err != nil { d.SetId("") From e48f4709b8a90f8dacba9f26214dfddda3474aeb Mon Sep 17 00:00:00 2001 From: hoanglm Date: Wed, 27 Nov 2024 01:35:07 +0700 Subject: [PATCH 7/8] [Object Storage] update: fix linting, unittest bucket config --- commons/client.go | 2 - .../object-storage/model_object_storage.go | 262 +++++ ...t_storage.go => object_storage_service.go} | 266 +---- .../object_storage_service_test.go | 1027 +++++++++++++++++ .../object-storage/resource_access_key.go | 10 +- 5 files changed, 1302 insertions(+), 265 deletions(-) create mode 100644 fptcloud/object-storage/model_object_storage.go rename fptcloud/object-storage/{datasource_object_storage.go => object_storage_service.go} (64%) create mode 100644 fptcloud/object-storage/object_storage_service_test.go diff --git a/commons/client.go b/commons/client.go index aaf6ed4..a435cf8 100644 --- a/commons/client.go +++ b/commons/client.go @@ -5,7 +5,6 @@ import ( "encoding/json" "fmt" "io" - "log" "net/http" "net/http/httptest" "net/url" @@ -113,7 +112,6 @@ func (c *Client) SendRequest(req *http.Request) ([]byte, error) { body, err := io.ReadAll(resp.Body) c.LastJSONResponse = string(body) - log.Printf("[DEBUG] Response: %s - URL: %s", c.LastJSONResponse, resp.Request.URL.String()) if resp.StatusCode >= 300 { return nil, HTTPError{Code: resp.StatusCode, Status: resp.Status, Reason: string(body)} diff --git a/fptcloud/object-storage/model_object_storage.go b/fptcloud/object-storage/model_object_storage.go new file mode 100644 index 0000000..1566c16 --- /dev/null +++ b/fptcloud/object-storage/model_object_storage.go @@ -0,0 +1,262 @@ +package fptcloud_object_storage + +type AbortIncompleteMultipartUpload struct { + DaysAfterInitiation int `json:"DaysAfterInitiation"` +} + +type AccessKey struct { + Credentials []struct { + ID string `json:"id"` + Credentials []struct { + AccessKey string `json:"accessKey"` + Active bool `json:"active"` + CreatedDate interface{} `json:"createdDate,omitempty"` + } `json:"credentials"` + } `json:"credentials"` +} + +type BucketAclRequest struct { + CannedAcl string `json:"cannedAcl"` + ApplyObjects bool `json:"applyObjects"` +} + +type BucketAclResponse struct { + Status bool `json:"status"` + Owner struct { + DisplayName string `json:"DisplayName"` + ID string `json:"ID"` + } `json:"Owner"` + Grants []struct { + Grantee struct { + DisplayName string `json:"DisplayName"` + ID string `json:"ID"` + Type string `json:"Type"` + } `json:"Grantee"` + Permission string `json:"Permission"` + } `json:"Grants"` + CannedACL string `json:"CannedACL"` +} + +type BucketCors struct { + CorsRules []CorsRule `json:"CORSRules"` +} + +type BucketCorsResponse struct { + Status bool `json:"status"` + CorsRules []struct { + ID string `json:"ID"` + AllowedHeaders []string `json:"AllowedHeaders,omitempty"` + AllowedMethods []string `json:"AllowedMethods"` + AllowedOrigins []string `json:"AllowedOrigins"` + ExposeHeaders []string `json:"ExposeHeaders,omitempty"` + MaxAgeSeconds int `json:"MaxAgeSeconds"` + } `json:"cors_rules"` + Total int `json:"total"` +} + +type BucketLifecycleResponse struct { + Status bool `json:"status"` + Rules []struct { + Expiration struct { + ExpiredObjectDeleteMarker bool `json:"ExpiredObjectDeleteMarker,omitempty"` + Days int `json:"Days,omitempty"` + } `json:"Expiration"` + ID string `json:"ID"` + Filter struct { + Prefix string `json:"Prefix"` + } `json:"Filter,omitempty"` + Status string `json:"Status"` + NoncurrentVersionExpiration struct { + NoncurrentDays int `json:"NoncurrentDays"` + } `json:"NoncurrentVersionExpiration"` + AbortIncompleteMultipartUpload struct { + DaysAfterInitiation int `json:"DaysAfterInitiation"` + } `json:"AbortIncompleteMultipartUpload"` + Prefix string `json:"Prefix,omitempty"` + } `json:"rules"` + Total int `json:"total"` +} + +type BucketPolicyRequest struct { + Policy string `json:"policy"` +} + +type BucketPolicyResponse struct { + Status bool `json:"status"` + Policy string `json:"policy"` +} + +type BucketRequest struct { + Name string `json:"name"` + Versioning string `json:"versioning,omitempty"` + Acl string `json:"acl"` +} + +type BucketVersioningRequest struct { + Status string `json:"status"` // "Enabled" or "Suspended" +} + +type BucketVersioningResponse struct { + Status bool `json:"status"` + Config string `json:"config"` // "Enabled" or "Suspended" +} + +type BucketWebsiteRequest struct { + Key string `json:"key"` + Suffix string `json:"suffix"` + Bucket string `json:"bucket"` +} + +type BucketWebsiteResponse struct { + Status bool `json:"status"` + Config struct { + ResponseMetadata struct { + RequestID string `json:"RequestId"` + HostID string `json:"HostId"` + HTTPStatusCode int `json:"HTTPStatusCode"` + HTTPHeaders struct { + XAmzRequestID string `json:"x-amz-request-id"` + ContentType string `json:"content-type"` + ContentLength string `json:"content-length"` + Date string `json:"date"` + } `json:"HTTPHeaders"` + RetryAttempts int `json:"RetryAttempts"` + } `json:"ResponseMetadata"` + IndexDocument struct { + Suffix string `json:"Suffix"` + } `json:"IndexDocument"` + ErrorDocument struct { + Key string `json:"Key"` + } `json:"ErrorDocument"` + } `json:"config,omitempty"` +} + +type CommonResponse struct { + Status bool `json:"status"` + Message string `json:"message,omitempty"` +} + +type CorsRule struct { + ID string `json:"ID,omitempty"` + AllowedOrigins []string `json:"AllowedOrigins"` + AllowedMethods []string `json:"AllowedMethods"` + ExposeHeaders []string `json:"ExposeHeaders,omitempty"` + AllowedHeaders []string `json:"AllowedHeaders,omitempty"` + MaxAgeSeconds int `json:"MaxAgeSeconds"` +} + +type CreateAccessKeyResponse struct { + Status bool `json:"status"` + Message string `json:"message,omitempty"` + Credential struct { + AccessKey string `json:"accessKey"` + SecretKey string `json:"secretKey"` + Active interface{} `json:"active"` + CreatedDate interface{} `json:"createdDate,omitempty"` + } `json:"credential,omitempty"` +} + +type DetailSubUser struct { + UserID string `json:"user_id"` + Arn interface{} `json:"arn,omitempty"` + Active bool `json:"active"` + Role string `json:"role"` + CreatedAt interface{} `json:"created_at,omitempty"` + AccessKeys []string `json:"access_keys"` +} + +type Expiration struct { + Days int `json:"Days,omitempty"` + ExpiredObjectDeleteMarker bool `json:"ExpiredObjectDeleteMarker,omitempty"` +} + +type Filter struct { + Prefix string `json:"Prefix"` +} + +type ListBucketResponse struct { + Buckets []struct { + Name string `json:"Name"` + CreationDate string `json:"CreationDate"` + IsEmpty bool `json:"isEmpty"` + S3ServiceID string `json:"s3_service_id"` + IsEnabledLogging bool `json:"isEnabledLogging"` + Endpoint string `json:"endpoint"` + } `json:"buckets"` + Total int `json:"total"` +} + +type NoncurrentVersionExpiration struct { + NoncurrentDays int `json:"NoncurrentDays"` +} + +type PutBucketAclResponse struct { + Status bool `json:"status"` + TaskID string `json:"taskId"` +} + +type S3BucketLifecycleConfig struct { + ID string `json:"ID"` + Filter Filter `json:"Filter"` + Expiration Expiration `json:"Expiration"` + NoncurrentVersionExpiration NoncurrentVersionExpiration `json:"NoncurrentVersionExpiration"` + AbortIncompleteMultipartUpload AbortIncompleteMultipartUpload `json:"AbortIncompleteMultipartUpload"` +} + +type S3ServiceEnableResponse struct { + Data []struct { + S3ServiceName string `json:"s3_service_name"` + S3ServiceID string `json:"s3_service_id"` + S3Platform string `json:"s3_platform"` + DefaultUser interface{} `json:"default_user,omitempty"` + MigrateQuota int `json:"migrate_quota"` + SyncQuota int `json:"sync_quota"` + RgwTotalNodes int `json:"rgw_total_nodes,omitempty"` + RgwUserActiveNodes int `json:"rgw_user_active_nodes,omitempty"` + HasUnusualConfig interface{} `json:"has_unusual_config,omitempty"` + } `json:"data"` + Total int `json:"total"` +} + +type Statement struct { + Sid string `json:"Sid"` + Effect string `json:"Effect"` + Principal map[string]interface{} `json:"Principal"` + Action []string `json:"Action"` + Resource []string `json:"Resource"` +} + +type SubUser struct { + Role string `json:"role"` + UserId string `json:"user_id"` +} + +type SubUserCreateKeyResponse struct { + Status bool `json:"status"` + Message string `json:"message,omitempty"` + Credential struct { + AccessKey string `json:"accessKey,omitempty"` + SecretKey string `json:"secretKey,omitempty"` + Active interface{} `json:"active,omitempty"` + CreatedDate interface{} `json:"createdDate,omitempty"` + } `json:"credential,omitempty"` +} + +type SubUserCreateRequest struct { + Username string `json:"username"` + DisplayName string `json:"display_name"` + Email string `json:"email"` + Permissions []string `json:"permissions"` +} + +type SubUserListResponse struct { + SubUsers []struct { + UserID string `json:"user_id"` + Arn string `json:"arn"` + Active bool `json:"active"` + Role string `json:"role"` + CreatedAt interface{} `json:"created_at,omitempty"` + AccessKeys interface{} `json:"access_keys,omitempty"` + } `json:"sub_users"` + Total int `json:"total"` +} diff --git a/fptcloud/object-storage/datasource_object_storage.go b/fptcloud/object-storage/object_storage_service.go similarity index 64% rename from fptcloud/object-storage/datasource_object_storage.go rename to fptcloud/object-storage/object_storage_service.go index 187c249..984d0c9 100644 --- a/fptcloud/object-storage/datasource_object_storage.go +++ b/fptcloud/object-storage/object_storage_service.go @@ -6,254 +6,6 @@ import ( common "terraform-provider-fptcloud/commons" ) -// SubUserCreateRequest represents the request body for creating a sub-user -type SubUserCreateRequest struct { - Username string `json:"username"` - DisplayName string `json:"display_name"` - Email string `json:"email"` - Permissions []string `json:"permissions"` -} -type AccessKey struct { - Credentials []struct { - ID string `json:"id"` - Credentials []struct { - AccessKey string `json:"accessKey"` - Active bool `json:"active"` - CreatedDate interface{} `json:"createdDate"` - } `json:"credentials"` - } `json:"credentials"` -} -type CreateAccessKeyResponse struct { - Status bool `json:"status"` - Message string `json:"message,omitempty"` - Credential struct { - AccessKey string `json:"accessKey"` - SecretKey string `json:"secretKey"` - Active interface{} `json:"active"` - CreatedDate interface{} `json:"createdDate"` - } `json:"credential,omitempty"` -} -type SubUserCreateKeyResponse struct { - Status bool `json:"status"` - Message string `json:"message,omitempty"` - Credential struct { - AccessKey string `json:"accessKey,omitempty"` - SecretKey string `json:"secretKey,omitempty"` - Active interface{} `json:"active,omitempty"` - CreatedDate interface{} `json:"createdDate,omitempty"` - } `json:"credential,omitempty"` -} - -type SubUser struct { - Role string `json:"role"` - UserId string `json:"user_id"` -} -type SubUserListResponse struct { - SubUsers []struct { - UserID string `json:"user_id"` - Arn string `json:"arn"` - Active bool `json:"active"` - Role string `json:"role"` - CreatedAt interface{} `json:"created_at"` - AccessKeys interface{} `json:"access_keys"` - } `json:"sub_users"` - Total int `json:"total"` -} -type CommonResponse struct { - Status bool `json:"status"` - Message string `json:"message,omitempty"` -} -type CorsRule struct { - ID string `json:"ID,omitempty"` - AllowedOrigins []string `json:"AllowedOrigins"` - AllowedMethods []string `json:"AllowedMethods"` - ExposeHeaders []string `json:"ExposeHeaders,omitempty"` - AllowedHeaders []string `json:"AllowedHeaders,omitempty"` - MaxAgeSeconds int `json:"MaxAgeSeconds"` -} -type BucketCors struct { - CorsRules []CorsRule `json:"CORSRules"` -} -type BucketCorsResponse struct { - Status bool `json:"status"` - CorsRules []struct { - ID string `json:"ID"` - AllowedHeaders []string `json:"AllowedHeaders,omitempty"` - AllowedMethods []string `json:"AllowedMethods"` - AllowedOrigins []string `json:"AllowedOrigins"` - ExposeHeaders []string `json:"ExposeHeaders,omitempty"` - MaxAgeSeconds int `json:"MaxAgeSeconds"` - } `json:"cors_rules"` - Total int `json:"total"` -} - -type BucketPolicyResponse struct { - Status bool `json:"status"` - Policy string `json:"policy"` -} -type BucketPolicyRequest struct { - Policy string `json:"policy"` -} -type Statement struct { - Sid string `json:"Sid"` - Effect string `json:"Effect"` - Principal map[string]interface{} `json:"Principal"` - Action []string `json:"Action"` - Resource []string `json:"Resource"` -} - -type BucketVersioningRequest struct { - Status string `json:"status"` // "Enabled" or "Suspended" -} -type BucketVersioningResponse struct { - Status bool `json:"status"` - Config string `json:"config"` // "Enabled" or "Suspended" -} - -type BucketAclResponse struct { - Status bool `json:"status"` - Owner struct { - DisplayName string `json:"DisplayName"` - ID string `json:"ID"` - } `json:"Owner"` - Grants []struct { - Grantee struct { - DisplayName string `json:"DisplayName"` - ID string `json:"ID"` - Type string `json:"Type"` - } `json:"Grantee"` - Permission string `json:"Permission"` - } `json:"Grants"` - CannedACL string `json:"CannedACL"` -} -type BucketAclRequest struct { - CannedAcl string `json:"cannedAcl"` - ApplyObjects bool `json:"applyObjects"` -} -type PutBucketAclResponse struct { - Status bool `json:"status"` - // TaskID may be empty if applyObjects is false, if applyObjects is true, the taskID will be returned - TaskID string `json:"taskId"` -} -type BucketWebsiteRequest struct { - Key string `json:"key"` - Suffix string `json:"suffix"` - Bucket string `json:"bucket"` -} -type BucketWebsiteResponse struct { - Status bool `json:"status"` - Config struct { - ResponseMetadata struct { - RequestID string `json:"RequestId"` - HostID string `json:"HostId"` - HTTPStatusCode int `json:"HTTPStatusCode"` - HTTPHeaders struct { - XAmzRequestID string `json:"x-amz-request-id"` - ContentType string `json:"content-type"` - ContentLength string `json:"content-length"` - Date string `json:"date"` - } `json:"HTTPHeaders"` - RetryAttempts int `json:"RetryAttempts"` - } `json:"ResponseMetadata"` - IndexDocument struct { - Suffix string `json:"Suffix"` - } `json:"IndexDocument"` - ErrorDocument struct { - Key string `json:"Key"` - } `json:"ErrorDocument"` - } `json:"config,omitempty"` -} - -type S3ServiceEnableResponse struct { - Data []struct { - S3ServiceName string `json:"s3_service_name"` - S3ServiceID string `json:"s3_service_id"` - S3Platform string `json:"s3_platform"` - DefaultUser interface{} `json:"default_user"` - MigrateQuota int `json:"migrate_quota"` - SyncQuota int `json:"sync_quota"` - RgwTotalNodes int `json:"rgw_total_nodes"` - RgwUserActiveNodes int `json:"rgw_user_active_nodes"` - HasUnusualConfig interface{} `json:"has_unusual_config"` - } `json:"data"` - Total int `json:"total"` -} - -// Bucket represents the response structure for a created bucket -type BucketRequest struct { - Name string `json:"name"` - Region string `json:"region"` - Versioning string `json:"versioning"` - Acl string `json:"acl"` -} -type ListBucketResponse struct { - Buckets []struct { - Name string `json:"Name"` - CreationDate string `json:"CreationDate"` - IsEmpty bool `json:"isEmpty"` - S3ServiceID string `json:"s3_service_id"` - IsEnabledLogging bool `json:"isEnabledLogging"` - Endpoint string `json:"endpoint"` - } `json:"buckets"` - Total int `json:"total"` -} -type BucketLifecycleResponse struct { - Status bool `json:"status"` - Rules []struct { - Expiration struct { - ExpiredObjectDeleteMarker bool `json:"ExpiredObjectDeleteMarker,omitempty"` - Days int `json:"Days,omitempty"` - } `json:"Expiration"` - ID string `json:"ID"` - Filter struct { - Prefix string `json:"Prefix"` - } `json:"Filter,omitempty"` - Status string `json:"Status"` - NoncurrentVersionExpiration struct { - NoncurrentDays int `json:"NoncurrentDays"` - } `json:"NoncurrentVersionExpiration"` - AbortIncompleteMultipartUpload struct { - DaysAfterInitiation int `json:"DaysAfterInitiation"` - } `json:"AbortIncompleteMultipartUpload"` - Prefix string `json:"Prefix,omitempty"` - } `json:"rules"` - Total int `json:"total"` -} - -type DetailSubUser struct { - UserID string `json:"user_id"` - Arn interface{} `json:"arn"` - Active bool `json:"active"` - Role string `json:"role"` - CreatedAt interface{} `json:"created_at"` - AccessKeys []string `json:"access_keys"` -} - -type S3BucketLifecycleConfig struct { - ID string `json:"ID"` - Filter Filter `json:"Filter"` - Expiration Expiration `json:"Expiration"` - NoncurrentVersionExpiration NoncurrentVersionExpiration `json:"NoncurrentVersionExpiration"` - AbortIncompleteMultipartUpload AbortIncompleteMultipartUpload `json:"AbortIncompleteMultipartUpload"` -} - -type Filter struct { - Prefix string `json:"Prefix"` -} - -type Expiration struct { - Days int `json:"Days,omitempty"` - ExpiredObjectDeleteMarker bool `json:"ExpiredObjectDeleteMarker,omitempty"` -} - -type NoncurrentVersionExpiration struct { - NoncurrentDays int `json:"NoncurrentDays"` -} - -type AbortIncompleteMultipartUpload struct { - DaysAfterInitiation int `json:"DaysAfterInitiation"` -} - // ObjectStorageService defines the interface for object storage operations type ObjectStorageService interface { CheckServiceEnable(vpcId string) S3ServiceEnableResponse @@ -265,7 +17,7 @@ type ObjectStorageService interface { // Access key ListAccessKeys(vpcId, s3ServiceId string) (AccessKey, error) - DeleteAccessKey(vpcId, s3ServiceId, accessKeyId string) error + DeleteAccessKey(vpcId, s3ServiceId, accessKeyId string) CommonResponse CreateAccessKey(vpcId, s3ServiceId string) *CreateAccessKeyResponse // Sub user @@ -318,12 +70,12 @@ func (s *ObjectStorageServiceImpl) CheckServiceEnable(vpcId string) S3ServiceEna apiPath := common.ApiPath.CheckS3ServiceEnable(vpcId) resp, err := s.client.SendGetRequest(apiPath) if err != nil { - return S3ServiceEnableResponse{} + return S3ServiceEnableResponse{Total: 0} } var response S3ServiceEnableResponse if err := json.Unmarshal(resp, &response); err != nil { - return S3ServiceEnableResponse{} + return S3ServiceEnableResponse{Total: 0} } return response } @@ -336,13 +88,13 @@ func (s *ObjectStorageServiceImpl) CreateBucket(req BucketRequest, vpcId, s3Serv return CommonResponse{Status: false, Message: err.Error()} } - var bucket BucketRequest + var bucket CommonResponse err = json.Unmarshal(resp, &bucket) if err != nil { return CommonResponse{Status: false, Message: err.Error()} } - return CommonResponse{Status: true, Message: "Bucket created successfully"} + return CommonResponse{Status: bucket.Status, Message: bucket.Message} } // CreateSubUser creates a new sub-user @@ -358,7 +110,6 @@ func (s *ObjectStorageServiceImpl) CreateSubUser(req SubUser, vpcId, s3ServiceId if err != nil { return &CommonResponse{Status: false, Message: err.Error()} } - return &CommonResponse{Status: subUser.Status, Message: "Sub-user created successfully"} } @@ -437,13 +188,14 @@ func (s *ObjectStorageServiceImpl) DeleteBucket(vpcId, s3ServiceId, bucketName s return CommonResponse{Status: true, Message: "Bucket deleted successfully"} } -func (s *ObjectStorageServiceImpl) DeleteAccessKey(vpcId, s3ServiceId, accessKeyId string) error { +func (s *ObjectStorageServiceImpl) DeleteAccessKey(vpcId, s3ServiceId, accessKeyId string) CommonResponse { apiPath := common.ApiPath.DeleteAccessKey(vpcId, s3ServiceId) body := map[string]string{"accessKey": accessKeyId} + if _, err := s.client.SendDeleteRequestWithBody(apiPath, body); err != nil { - return fmt.Errorf("failed to delete access key: %v", err) + return CommonResponse{Status: false, Message: err.Error()} } - return nil + return CommonResponse{Status: true, Message: "Access key deleted successfully"} } // Implement bucket policy methods diff --git a/fptcloud/object-storage/object_storage_service_test.go b/fptcloud/object-storage/object_storage_service_test.go new file mode 100644 index 0000000..8faf773 --- /dev/null +++ b/fptcloud/object-storage/object_storage_service_test.go @@ -0,0 +1,1027 @@ +package fptcloud_object_storage_test + +import ( + "testing" + + common "terraform-provider-fptcloud/commons" + fptcloud_object_storage "terraform-provider-fptcloud/fptcloud/object-storage" + + "github.com/stretchr/testify/assert" +) + +func TestCreateResourceAccessKey_ReturnsResourceAccessKeyIDWhenSuccess(t *testing.T) { + mockResponse := `{ + "status": true, + "message": "Create resource access key successfully", + "credential": { + "accessKey": "11111111-aaaa-1111-bbbb-111111111111", + "secretKey": "22222222-bbbb-2222-cccc-222222222222" + } + }` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/s3_service_id/user/credentials": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + vpcId := "vpc_id" + s3ServiceId := "s3_service_id" + resourceAccessKeyID := service.CreateAccessKey(vpcId, s3ServiceId) + assert.NotNil(t, resourceAccessKeyID) + + assert.Equal(t, "11111111-aaaa-1111-bbbb-111111111111", resourceAccessKeyID.Credential.AccessKey) + assert.Equal(t, "22222222-bbbb-2222-cccc-222222222222", resourceAccessKeyID.Credential.SecretKey) + assert.Equal(t, true, resourceAccessKeyID.Status) + assert.Equal(t, "Create resource access key successfully", resourceAccessKeyID.Message) +} + +func TestCreateResourceAccessKey_ReturnsErrorWhenFailed(t *testing.T) { + mockResponse := `{ + "status": false, + "message": "Failed to create resource access key", + "credential": {} + }` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/s3_service_id/user/credentials": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + vpcId := "vpc_id" + s3ServiceId := "s3_service_id" + resourceAccessKeyID := service.CreateAccessKey(vpcId, s3ServiceId) + assert.NotNil(t, resourceAccessKeyID) + + assert.Equal(t, "", resourceAccessKeyID.Credential.AccessKey) + assert.Equal(t, "", resourceAccessKeyID.Credential.SecretKey) + assert.Equal(t, false, resourceAccessKeyID.Status) + assert.Equal(t, "Failed to create resource access key", resourceAccessKeyID.Message) +} + +func TestDeleteResouurceAccessKey_ReturnOkWhenSuccess(t *testing.T) { + mockResponse := `{ + "status": true, + "message": "Delete resource access key successfully" + }` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/s3_service_id/user/credentials/credential_id": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + vpcId := "vpc_id" + s3ServiceId := "s3_service_id" + credentialId := "credential_id" + res := service.DeleteAccessKey(vpcId, s3ServiceId, credentialId) + assert.NotNil(t, res) + assert.Equal(t, true, res.Status) + assert.Equal(t, "Access key deleted successfully", res.Message) +} + +func TestListAccessKeys_ReturnAccessKeysWhenSuccess(t *testing.T) { + mockResponse := `{ + "credentials": [ + { + "id": "credential_id", + "credentials": [ + { + "accessKey": "11111111-aaaa-1111-bbbb-111111111111", + "active": true + } + ] + } + ] + }` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/user/credentials?s3_service_id=s3_service_id": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + vpcId := "vpc_id" + s3ServiceId := "s3_service_id" + accessKeys, _ := service.ListAccessKeys(vpcId, s3ServiceId) + assert.NotNil(t, accessKeys) + assert.Equal(t, "credential_id", accessKeys.Credentials[0].ID) + assert.Equal(t, "11111111-aaaa-1111-bbbb-111111111111", accessKeys.Credentials[0].Credentials[0].AccessKey) + assert.Equal(t, true, accessKeys.Credentials[0].Credentials[0].Active) +} + +func TestCreateBucket_ReturnsBucketIDWhenSuccess(t *testing.T) { + mockResponse := `{ + "status": true, + "message": "Create bucket successfully" + }` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/s3_service_id/bucket": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + vpcId := "vpc_id" + s3ServiceId := "s3_service_id" + bucketRequest := fptcloud_object_storage.BucketRequest{ + Name: "bucket_name", + Acl: "private", + } + r := service.CreateBucket(bucketRequest, vpcId, s3ServiceId) + assert.NotNil(t, r) + assert.Equal(t, true, r.Status) +} + +func TestCreateBucket_ReturnsErrorWhenFailed(t *testing.T) { + mockResponse := `{ + "status": false, + "message": "Failed to create bucket", + }` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/s3_service_id/bucket": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + vpcId := "vpc_id" + s3ServiceId := "s3_service_id" + bucketRequest := fptcloud_object_storage.BucketRequest{ + Name: "bucket_name", + Acl: "private", + } + r := service.CreateBucket(bucketRequest, vpcId, s3ServiceId) + assert.NotNil(t, r) + assert.Equal(t, false, r.Status) +} + +func TestListBuckets_ReturnsBucketsWhenSuccess(t *testing.T) { + mockResponse := `{ + "buckets": [ + { + "Name": "bucket_name", + "CreationDate": "2024-11-26T16:43:55.121000+00:00", + "isEmpty": false, + "s3_service_id": "s3_service_id", + "isEnabledLogging": false, + "endpoint": "https://xxxx-xxx.xyz.com" + } + ], + "total": 1 + }` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/buckets?page=5&page_size=10&s3_service_id=s3_service_id": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + vpcId := "vpc_id" + s3ServiceId := "s3_service_id" + buckets := service.ListBuckets(vpcId, s3ServiceId, 5, 10) + assert.NotNil(t, buckets) + assert.Equal(t, "bucket_name", buckets.Buckets[0].Name) + assert.Equal(t, "2024-11-26T16:43:55.121000+00:00", buckets.Buckets[0].CreationDate) + assert.Equal(t, false, buckets.Buckets[0].IsEmpty) + assert.Equal(t, "s3_service_id", buckets.Buckets[0].S3ServiceID) + assert.Equal(t, false, buckets.Buckets[0].IsEnabledLogging) + assert.Equal(t, "https://xxxx-xxx.xyz.com", buckets.Buckets[0].Endpoint) +} + +func TestListBuckets_ReturnsErrorWhenFailed(t *testing.T) { + mockResponse := `{ + "buckets": [], + "total": 0 + }` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/buckets?page=5&page_size=10&s3_service_id=s3_service_id": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + vpcId := "vpc_id" + s3ServiceId := "s3_service_id" + buckets := service.ListBuckets(vpcId, s3ServiceId, 5, 10) + assert.NotNil(t, buckets) + assert.Equal(t, 0, buckets.Total) +} + +func TestDeleteBucket_ReturnsOkWhenSuccess(t *testing.T) { + mockResponse := `{ + "status": true + }` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/s3_service_id": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + vpcId := "vpc_id" + s3ServiceId := "s3_service_id" + bucketName := "bucket_name" + res := service.DeleteBucket(vpcId, s3ServiceId, bucketName) + assert.NotNil(t, res) + assert.Equal(t, true, res.Status) +} + +func TestCreateSubUser_ReturnsTrueWhenSuccess(t *testing.T) { + mockResponse := `{ + "status": true, + "message": "Sub-user created successfully" + }` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/s3_service_id/sub-users/create": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + vpcId := "vpc_id" + s3ServiceId := "s3_service_id" + subUserRequest := fptcloud_object_storage.SubUser{ + Role: "admin", + UserId: "user_id", + } + r := service.CreateSubUser(subUserRequest, vpcId, s3ServiceId) + assert.NotNil(t, r) + assert.Equal(t, true, r.Status) + assert.Equal(t, "Sub-user created successfully", r.Message) +} + +func TestCreateSubUser_ReturnsFalseWhenFailed(t *testing.T) { + mockResponse := `{ + "status": false + }` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/s3_service_id/sub-users/wrong_endpoint": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + vpcId := "vpc_id" + s3ServiceId := "s3_service_id" + subUserRequest := fptcloud_object_storage.SubUser{ + Role: "admin", + UserId: "user_id", + } + r := service.CreateSubUser(subUserRequest, vpcId, s3ServiceId) + assert.NotNil(t, r) + assert.Equal(t, false, r.Status) +} + +func TestDeleteSubUser_ReturnOkWhenSuccess(t *testing.T) { + mockResponse := `{}` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/s3_service_id/sub-users/sub_user_id/delete": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + vpcId := "vpc_id" + s3ServiceId := "s3_service_id" + subUserId := "sub_user_id" + err := service.DeleteSubUser(vpcId, s3ServiceId, subUserId) + assert.Nil(t, err) +} + +func TestListSubUsers_ReturnsSubUsersWhenSuccess(t *testing.T) { + mockResponse := `{ + "sub_users": [ + { + "user_id": "sgn-replicate123123", + "arn": "arn:aws:iam:::user/xxx:sgn-replicate123123", + "active": true, + "role": "SubUserReadWrite" + } + ], + "total": 1 + }` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/s3_service_id/sub-users/list?page=5&page_size=25": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + vpcId := "vpc_id" + s3ServiceId := "s3_service_id" + subUsers, err := service.ListSubUsers(vpcId, s3ServiceId, 5, 25) + assert.NotNil(t, subUsers) + assert.Nil(t, err) + assert.Equal(t, 1, subUsers.Total) + assert.Equal(t, "sgn-replicate123123", subUsers.SubUsers[0].UserID) + assert.Equal(t, "arn:aws:iam:::user/xxx:sgn-replicate123123", subUsers.SubUsers[0].Arn) + assert.Equal(t, true, subUsers.SubUsers[0].Active) + assert.Equal(t, "SubUserReadWrite", subUsers.SubUsers[0].Role) +} + +func TestListSubUsers_ReturnsErrorWhenFailed(t *testing.T) { + mockResponse := `{ + "sub_users": [], + "total": 0, + }` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/s3_service_id/sub-users/list?page=5&page_size=25": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + vpcId := "vpc_id" + s3ServiceId := "s3_service_id" + subUsers, err := service.ListSubUsers(vpcId, s3ServiceId, 5, 25) + assert.NotNil(t, subUsers) + assert.NotNil(t, err) + assert.Equal(t, 0, subUsers.Total) +} + +func TestGetDetailSubUser_ReturnOkWhenSuccess(t *testing.T) { + mockResponse := ` + { + "user_id": "sgn-replicate123123", + "active": true, + "role": "SubUserReadWrite", + "access_keys": [] + } + ` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/s3_service_id/sub-users/sub_user_id": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + vpcId := "vpc_id" + s3ServiceId := "s3_service_id" + subUserId := "sub_user_id" + subUser := service.DetailSubUser(vpcId, s3ServiceId, subUserId) + assert.NotNil(t, subUser) + assert.Equal(t, "sgn-replicate123123", subUser.UserID) + assert.Equal(t, true, subUser.Active) + assert.Equal(t, "SubUserReadWrite", subUser.Role) +} + +func TestCreateSubUserAccessKey_ReturnsAccessKeyWhenSuccess(t *testing.T) { + mockResponse := `{ + "status": true, + "credential": { + "accessKey": "example_access_key", + "secretKey": "example_secret_key" + } + }` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/s3_service_id/sub-users/sub_user_id/credentials/create": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + vpcId := "vpc_id" + s3ServiceId := "s3_service_id" + subUserId := "sub_user_id" + accessKey := service.CreateSubUserAccessKey(vpcId, s3ServiceId, subUserId) + assert.NotNil(t, accessKey) + assert.Equal(t, "example_access_key", accessKey.Credential.AccessKey) + assert.Equal(t, "example_secret_key", accessKey.Credential.SecretKey) + assert.Equal(t, true, accessKey.Status) +} + +func TestCreateSubUserAccessKey_ReturnsErrorWhenFailed(t *testing.T) { + mockResponse := `{ + "status": false, + }` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/s3_service_id/sub-users/sub_user_id/credentials/create": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + vpcId := "vpc_id" + s3ServiceId := "s3_service_id" + subUserId := "sub_user_id" + accessKey := service.CreateSubUserAccessKey(vpcId, s3ServiceId, subUserId) + assert.NotNil(t, accessKey) + assert.Equal(t, "", accessKey.Credential.AccessKey) + assert.Equal(t, "", accessKey.Credential.SecretKey) + assert.Equal(t, false, accessKey.Status) +} + +func TestDeleteSubUserAccessKey_ReturnOkWhenSuccess(t *testing.T) { + mockResponse := `{ + "status": true + }` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/s3_service_id/sub-users/sub_user_id/credentials/delete": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + vpcId := "vpc_id" + s3ServiceId := "s3_service_id" + subUserId := "sub_user_id" + accessKeyId := "access_key_id" + res := service.DeleteSubUserAccessKey(vpcId, s3ServiceId, subUserId, accessKeyId) + assert.NotNil(t, res) + assert.Equal(t, true, res.Status) +} + +func TestPutBucketPolicy_ReturnOkWhenSuccess(t *testing.T) { + mockResponse := `{ + "status": true + }` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/s3_service_id/bucket/bucket_name/put-policy": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + vpcId := "vpc_id" + s3ServiceId := "s3_service_id" + bucketName := "bucket_name" + policy := map[string]interface { + }{"Version": "2012-10-17"} + res := service.PutBucketPolicy(vpcId, s3ServiceId, bucketName, policy) + assert.NotNil(t, res) + assert.Equal(t, true, res.Status) +} + +func TestGetBucketPolicy_ReturnsPolicyWhenSuccess(t *testing.T) { + mockResponse := `{ + "policy": "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Sid\":\"AllowAllS3Actions\",\"Effect\":\"Allow\",\"Principal\":\"*\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::bucket_name/*\"}]}", + "status": true + }` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/s3_service_id/bucket/bucket_name/get-policy": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + vpcId := "vpc_id" + s3ServiceId := "s3_service_id" + bucketName := "bucket_name" + policy := service.GetBucketPolicy(vpcId, s3ServiceId, bucketName) + assert.NotNil(t, policy) + assert.Equal(t, "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Sid\":\"AllowAllS3Actions\",\"Effect\":\"Allow\",\"Principal\":\"*\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::bucket_name/*\"}]}", policy.Policy) + assert.Equal(t, true, policy.Status) +} + +func TestGetBucketPolicy_ReturnsFalseWhenFailed(t *testing.T) { + mockResponse := `{ + "policy": "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Sid\":\"AllowAllS3Actions\",\"Effect\":\"Allow\",\"Principal\":\"*\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::bucket_name/*\"}]}", + "status": false, + }` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/s3_service_id/bucket/bucket_name/get-policy": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + vpcId := "vpc_id" + s3ServiceId := "s3_service_id" + bucketName := "bucket_name" + policy := service.GetBucketPolicy(vpcId, s3ServiceId, bucketName) + assert.NotNil(t, policy) + assert.Equal(t, false, policy.Status) +} + +func TestCreateBucketCors_ReturnOkWhenSuccess(t *testing.T) { + mockResponse := `{ + "status": true + }` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/s3_service_id/bucket/bucket_name/create-bucket-cors": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + bucketName := "bucket_name" + cors := map[string]interface{}{ + "AllowedHeaders": []string{"*"}, + } + res := service.CreateBucketCors("vpc_id", "s3_service_id", bucketName, cors) + assert.NotNil(t, res) + assert.Equal(t, true, res.Status) +} + +func TestUpdateBucketCors_ReturnOkWhenSuccess(t *testing.T) { + mockResponse := `{ + "status": true + }` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/s3_service_id/bucket/bucket_name/create-bucket-cors": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + bucketName := "bucket_name" + cors := map[string]interface{}{ + "AllowedHeaders": []string{"*"}, + } + arrCors := append([]map[string]interface{}{}, cors) + res := service.UpdateBucketCors("vpc_id", "s3_service_id", bucketName, arrCors) + assert.NotNil(t, res) + assert.Equal(t, true, res.Status) +} + +func TestGetBucketCors_ReturnCorsWhenSuccess(t *testing.T) { + mockResponse := `{ + "cors_rules": [ + { + "AllowedHeaders": [ + "*" + ] + } + ], + "status": true, + "total": 1 + }` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/s3_service_id/bucket/bucket_name/cors?page=5&page_size=25": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + bucketName := "bucket_name" + cors, err := service.GetBucketCors("vpc_id", "s3_service_id", bucketName, 5, 25) + assert.NotNil(t, cors) + assert.Nil(t, err) + assert.Equal(t, true, cors.Status) + assert.Equal(t, "*", cors.CorsRules[0].AllowedHeaders[0]) +} + +func TestGetBucketCors_ReturnFalseWhenFailed(t *testing.T) { + mockResponse := `{ + "cors_rules": [], + "status": false, + "total": 0, + }` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/s3_service_id/bucket/bucket_name/cors?page=5&page_size=25": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + bucketName := "bucket_name" + cors, err := service.GetBucketCors("vpc_id", "s3_service_id", bucketName, 5, 25) + assert.Nil(t, cors) + assert.NotNil(t, err) +} + +func TestPutBucketVersioning_ReturnNilWhenSuccess(t *testing.T) { + mockResponse := `{ + "status": true + }` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/s3_service_id/bucket/bucket_name/put-versioning": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + bucketName := "bucket_name" + versioning := fptcloud_object_storage.BucketVersioningRequest{ + Status: "Enabled", + } + res := service.PutBucketVersioning("vpc_id", "s3_service_id", bucketName, versioning) + assert.Nil(t, res) +} + +func TestGetBucketVersioning_ReturnBucketVersioning(t *testing.T) { + mockResponse := `{ + "status": true, + "config": "Enabled" + }` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/s3_service_id/bucket/bucket_name/get-versioning": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + bucketName := "bucket_name" + versioning := service.GetBucketVersioning("vpc_id", "s3_service_id", bucketName) + assert.NotNil(t, versioning) + assert.Equal(t, true, versioning.Status) +} + +func TestPutBucketAcl_ReturnAclWhenSuccess(t *testing.T) { + mockResponse := `{ + "status": true, + "taskId": "task_id" + }` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/s3_service_id/bucket/bucket_name/acl": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + bucketName := "bucket_name" + acl := fptcloud_object_storage.BucketAclRequest{ + CannedAcl: "private", + ApplyObjects: true, + } + res := service.PutBucketAcl("vpc_id", "s3_service_id", bucketName, acl) + assert.NotNil(t, res) + assert.Equal(t, true, res.Status) + assert.Equal(t, "task_id", res.TaskID) +} + +func TestGetBucketAcl_ReturnAclWhenSuccess(t *testing.T) { + mockResponse := `{ + "status": true, + "Owner": { + "DisplayName": "example_user_id", + "ID": "example_user_id" + }, + "Grants": [ + { + "Grantee": { + "DisplayName": "example_user_id", + "ID": "example_user_id", + "Type": "CanonicalUser" + }, + "Permission": "FULL_CONTROL" + } + ], + "CannedACL": "private" + }` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/s3_service_id/bucket/bucket_name/acl": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + bucketName := "bucket_name" + acl := service.GetBucketAcl("vpc_id", "s3_service_id", bucketName) + assert.NotNil(t, acl) + assert.Equal(t, true, acl.Status) + assert.Equal(t, "example_user_id", acl.Owner.DisplayName) + assert.Equal(t, "example_user_id", acl.Owner.ID) + assert.Equal(t, "example_user_id", acl.Grants[0].Grantee.DisplayName) + assert.Equal(t, "example_user_id", acl.Grants[0].Grantee.ID) + assert.Equal(t, "CanonicalUser", acl.Grants[0].Grantee.Type) + assert.Equal(t, "FULL_CONTROL", acl.Grants[0].Permission) +} + +func TestGetBucketAcl_ReturnFalseWhenFailed(t *testing.T) { + mockResponse := `{ + "status": false + }` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/s3_service_id/bucket/bucket_name/acl-wrong-endpoint": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + bucketName := "bucket_name" + acl := service.GetBucketAcl("vpc_id", "s3_service_id", bucketName) + assert.NotNil(t, acl) + assert.Equal(t, false, acl.Status) +} + +func TestGetBucketAcl_ReturnFalseWhenFailedUnmarshalJson(t *testing.T) { + mockResponse := `{ + "status": false,,, + }` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/s3_service_id/bucket/bucket_name/acl-wrong-endpoint": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + bucketName := "bucket_name" + acl := service.GetBucketAcl("vpc_id", "s3_service_id", bucketName) + assert.NotNil(t, acl) + assert.Equal(t, false, acl.Status) +} + +func TestPutBucketWebsite_ReturnOkWhenSuccess(t *testing.T) { + mockResponse := `{ + "status": true, + }` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/s3_service_id/bucket/bucket_name/put-config": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + bucketName := "bucket_name" + website := fptcloud_object_storage.BucketWebsiteRequest{ + Key: "index.html", + Suffix: "index2.html", + Bucket: "bucket_name", + } + res := service.PutBucketWebsite("vpc_id", "s3_service_id", bucketName, website) + assert.NotNil(t, res) + assert.Equal(t, true, res.Status) +} + +func TestPutBucketWebsite_ReturnOFalseWhenFailed(t *testing.T) { + mockResponse := `{ + "status": false, + }` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/s3_service_id/bucket/bucket_name/put-config": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + bucketName := "bucket_name" + website := fptcloud_object_storage.BucketWebsiteRequest{ + Key: "index.html", + Suffix: "index2.html", + Bucket: "bucket_name", + } + res := service.PutBucketWebsite("vpc_id", "s3_service_id", bucketName, website) + assert.NotNil(t, res) + assert.Equal(t, true, res.Status) +} + +func TestDeleteBucketStaticWebsite_ReturnTrueWhenSuccess(t *testing.T) { + mockResponse := `{ + "status": true + }` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/s3_service_id/bucket/bucket_name/delete-config": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + bucketName := "bucket_name" + res := service.DeleteBucketStaticWebsite("vpc_id", "s3_service_id", bucketName) + assert.NotNil(t, res) + assert.Equal(t, true, res.Status) +} + +func TestDeleteBucketStaticWebsite_ReturnFalseWhenError(t *testing.T) { + mockResponse := `{ + "status": false + }` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/s3_service_id/bucket/bucket_name/delete-config-wrong-endpoint": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + bucketName := "bucket_name" + res := service.DeleteBucketStaticWebsite("vpc_id", "s3_service_id", bucketName) + assert.NotNil(t, res) + assert.Equal(t, true, res.Status) +} + +func TestGetBucketWebsite_ReturnWebsiteWhenSuccess(t *testing.T) { + mockResponse := `{ + "status": true, + "config": { + "ResponseMetadata": { + "RequestId": "tx000000976595dcbf0f8e1-006746c273-326c5-han02-1", + "HostId": "", + "HTTPStatusCode": 200, + "HTTPHeaders": { + "x-amz-request-id": "tx000000976595dcbf0f8e1-006746c273-326c5-han02-1", + "content-type": "application/xml", + "content-length": "241", + "date": "Wed, 27 Nov 2024 06:55:47 GMT", + "strict-transport-security": "max-age=16000000; includeSubDomains; preload;", + "access-control-allow-origin": "*" + }, + "RetryAttempts": 0 + }, + "IndexDocument": { + "Suffix": "index.html" + }, + "ErrorDocument": { + "Key": "error.html" + } + } + }` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/s3_service_id/bucket/bucket_name/get-config": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + bucketName := "bucket_name" + website := service.GetBucketWebsite("vpc_id", "s3_service_id", bucketName) + assert.NotNil(t, website) + assert.Equal(t, true, website.Status) + assert.Equal(t, "index.html", website.Config.IndexDocument.Suffix) + assert.Equal(t, "error.html", website.Config.ErrorDocument.Key) +} + +func TestGetBucketWebsite_ReturnFalseWhenError(t *testing.T) { + mockResponse := `{ + "status": false + }` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/s3_service_id/bucket/bucket_name/get-config-wrong-endpoint": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + bucketName := "bucket_name" + website := service.GetBucketWebsite("vpc_id", "s3_service_id", bucketName) + assert.NotNil(t, website) + assert.Equal(t, false, website.Status) +} + +func TestGetBucketLifecycle_ReturnRuleWhenSuccess(t *testing.T) { + mockResponse := `{ + "status": true, + "rules": [ + { + "ID": "rule_id", + "Prefix": "prefix", + "Status": "Enabled", + "Expiration": { + "Days": 30 + } + } + ], + "total": 1 + }` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/s3_service_id/bucket/bucket_name/lifecycles?page=5&page_size=25": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + bucketName := "bucket_name" + lifecycle := service.GetBucketLifecycle("vpc_id", "s3_service_id", bucketName, 5, 25) + assert.NotNil(t, lifecycle) + assert.Equal(t, true, lifecycle.Status) + assert.Equal(t, "rule_id", lifecycle.Rules[0].ID) + assert.Equal(t, "prefix", lifecycle.Rules[0].Prefix) + assert.Equal(t, "Enabled", lifecycle.Rules[0].Status) + assert.Equal(t, 30, lifecycle.Rules[0].Expiration.Days) + assert.Equal(t, 1, lifecycle.Total) +} + +func TestGetBucketLifecycle_ReturnFalseWhenFailed(t *testing.T) { + mockResponse := `{ + "status": false, + "rules": [], + "total": 0 + }` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/s3_service_id/bucket/bucket_name/lifecycles-wrong-endpoint?page=5&page_size=25": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + bucketName := "bucket_name" + lifecycle := service.GetBucketLifecycle("vpc_id", "s3_service_id", bucketName, 5, 25) + assert.NotNil(t, lifecycle) + assert.Equal(t, false, lifecycle.Status) + assert.Equal(t, 0, lifecycle.Total) +} + +func TestPutBucketLifecycle_ReturnOkWhenSuccess(t *testing.T) { + mockResponse := `{ + "status": true + }` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/s3_service_id/bucket/bucket_name/create-bucket-lifecycle-configuration": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + bucketName := "bucket_name" + rule := map[string]interface{}{ + "ID": "rule_id", + "Prefix": "prefix", + "Status": "Enabled", + "Expiration": map[string]interface{}{ + "Days": 30, + }, + } + res := service.PutBucketLifecycle("vpc_id", "s3_service_id", bucketName, rule) + assert.NotNil(t, res) + assert.Equal(t, true, res.Status) +} + +func TestPutBucketLifecycle_ReturnFalseWhenError(t *testing.T) { + mockResponse := `{ + "status": false + }` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/s3_service_id/bucket/bucket_name/create-bucket-lifecycle-configuration-wrong-endpoint": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + bucketName := "bucket_name" + rule := map[string]interface{}{ + "ID": "rule_id", + "Prefix": "prefix", + "Status": "Enabled", + "Expiration": map[string]interface{}{ + "Days": 30, + }, + } + res := service.PutBucketLifecycle("vpc_id", "s3_service_id", bucketName, rule) + assert.NotNil(t, res) + assert.Equal(t, false, res.Status) +} + +func TestPutBucketLifecycle_ReturnFalseWhenErrorUnmarshalJson(t *testing.T) { + mockResponse := `{ + "status": false,,,,@#$@#$234 + }` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/s3_service_id/bucket/bucket_name/create-bucket-lifecycle-configuration-wrong-endpoint": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + bucketName := "bucket_name" + rule := map[string]interface{}{ + "ID": "rule_id", + "Prefix": "prefix", + "Status": "Enabled", + "Expiration": map[string]interface{}{ + "Days": 30, + }, + } + res := service.PutBucketLifecycle("vpc_id", "s3_service_id", bucketName, rule) + assert.NotNil(t, res) + assert.Equal(t, false, res.Status) +} + +func TestDeleteBucketLifecycle_ReturnOkWhenSuccess(t *testing.T) { + mockResponse := `{ + "status": true + }` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/s3_service_id/bucket/bucket_name/delete-bucket-lifecycle-configuration": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + bucketName := "bucket_name" + rule := map[string]interface{}{ + "ID": "rule_id", + "Prefix": "prefix", + "Status": "Enabled", + "Expiration": map[string]interface{}{ + "Days": 30, + }, + } + res := service.DeleteBucketLifecycle("vpc_id", "s3_service_id", bucketName, rule) + assert.NotNil(t, res) + assert.Equal(t, true, res.Status) +} + +func TestDeleteBucketLifecycle_ReturnFalseWhenError(t *testing.T) { + mockResponse := `{ + "status": false + }` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/s3_service_id/bucket/bucket_name/delete-bucket-lifecycle-configuration-wrong-endpoint": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + bucketName := "bucket_name" + rule := map[string]interface{}{ + "ID": "rule_id", + "Prefix": "prefix", + "Status": "Enabled", + "Expiration": map[string]interface{}{ + "Days": 30, + }, + } + res := service.DeleteBucketLifecycle("vpc_id", "s3_service_id", bucketName, rule) + assert.NotNil(t, res) + assert.Equal(t, false, res.Status) +} + +func TestDeleteBucketLifecycle_ReturnFalseWhenErrorUnmarshalJson(t *testing.T) { + mockResponse := `{ + "status": false,,,,@#$@#$234 + }` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/s3_service_id/bucket/bucket_name/delete-bucket-lifecycle-configuration-wrong-endpoint": mockResponse, + }) + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + bucketName := "bucket_name" + rule := map[string]interface{}{ + "ID": "rule_id", + "Prefix": "prefix", + "Status": "Enabled", + "Expiration": map[string]interface{}{ + "Days": 30, + }, + } + res := service.DeleteBucketLifecycle("vpc_id", "s3_service_id", bucketName, rule) + assert.NotNil(t, res) + assert.Equal(t, false, res.Status) +} + +func TestCheckServiceEnable_ReturnServicesWhenSuccess(t *testing.T) { + mockResponse := `{ + "data": [ + { + "s3_service_name": "HN-02", + "s3_service_id": "s3_service_id", + "s3_platform": "ceph", + "default_user": "fake-default-user", + "migrate_quota": 3, + "sync_quota": 3, + "rgw_total_nodes": 4, + "rgw_user_active_nodes": 2 + } + ], + "total": 1 + }` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/check-service-enabled?check_unlimited=undefined": mockResponse, + }) + + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + vpcId := "vpc_id" + services := service.CheckServiceEnable(vpcId) + assert.NotNil(t, services) + assert.Equal(t, 1, services.Total) + assert.Equal(t, "HN-02", services.Data[0].S3ServiceName) + assert.Equal(t, "s3_service_id", services.Data[0].S3ServiceID) + assert.Equal(t, "ceph", services.Data[0].S3Platform) +} + +func TestCheckServiceEnable_ReturnFalseWhenError(t *testing.T) { + mockResponse := `{ + "total": 0 + }` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/check-service-enabled?check_unlimited=wrong-param": mockResponse, + }) + + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + vpcId := "vpc_id" + services := service.CheckServiceEnable(vpcId) + assert.NotNil(t, services) + assert.Equal(t, 0, services.Total) +} + +func TestCheckServiceEnable_ReturnFalseWhenErrorUnmarshal(t *testing.T) { + mockResponse := `{ + "total": #$%#$%#$%#$%#$%!@#!23, + }` + mockClient, server, _ := common.NewClientForTesting(map[string]string{ + "/v1/vmware/vpc/vpc_id/s3/check-service-enabled?check_unlimited=wrong-param": mockResponse, + }) + + defer server.Close() + service := fptcloud_object_storage.NewObjectStorageService(mockClient) + vpcId := "vpc_id" + services := service.CheckServiceEnable(vpcId) + assert.NotNil(t, services) + assert.Equal(t, 0, services.Total) +} diff --git a/fptcloud/object-storage/resource_access_key.go b/fptcloud/object-storage/resource_access_key.go index 8914685..8af42ec 100644 --- a/fptcloud/object-storage/resource_access_key.go +++ b/fptcloud/object-storage/resource_access_key.go @@ -148,17 +148,15 @@ func resourceAccessKeyDelete(ctx context.Context, d *schema.ResourceData, m inte return diag.FromErr(fmt.Errorf("region %s is not enabled", regionName)) } - log.Printf("[DEBUG] Found S3 service ID: %s", s3ServiceDetail.S3ServiceId) - if accessKeyId == "" { log.Printf("[ERROR] access_key_id is empty") return diag.Errorf("access_key_id is required for deletion") } - err := service.DeleteAccessKey(vpcId, s3ServiceDetail.S3ServiceId, accessKeyId) - if err != nil { - log.Printf("[ERROR] Failed to delete access key %s: %v", accessKeyId, err) - return diag.FromErr(err) + data := service.DeleteAccessKey(vpcId, s3ServiceDetail.S3ServiceId, accessKeyId) + if !data.Status { + log.Printf("[ERROR] Failed to delete access key %s: %v", accessKeyId, data.Message) + return diag.Errorf("failed to delete access key %s: %s", accessKeyId, data.Message) } if err := d.Set("status", true); err != nil { d.SetId("") From bfa1e653e3519ce4c1e81dfb118f9f52af24fb3c Mon Sep 17 00:00:00 2001 From: hoanglm Date: Thu, 28 Nov 2024 01:31:35 +0700 Subject: [PATCH 8/8] [Object Storage] fix: fix linting, remove duplicate code --- .../dataqsource_object_storage_bucket_acl.go | 21 +-- .../datasource_object_storage_access_key.go | 2 +- .../datasource_object_storage_bucket.go | 2 +- .../datasource_object_storage_bucket_cors.go | 2 +- ...asource_object_storage_bucket_lifecycle.go | 71 +++++---- ...datasource_object_storage_bucket_policy.go | 2 +- ...ce_object_storage_bucket_static_website.go | 2 +- ...source_object_storage_bucket_versioning.go | 2 +- .../datasource_object_storage_sub_user.go | 2 +- ...tasource_object_storage_sub_user_detail.go | 12 +- .../object-storage/object_storage_service.go | 4 + .../object_storage_service_test.go | 150 +++++++++--------- .../object-storage/resource_access_key.go | 8 +- fptcloud/object-storage/resource_bucket.go | 6 +- .../object-storage/resource_bucket_acl.go | 4 +- .../object-storage/resource_bucket_cors.go | 50 +++--- .../resource_bucket_lifecycle.go | 65 ++++---- .../object-storage/resource_bucket_policy.go | 26 +-- .../resource_bucket_static_website.go | 4 +- .../resource_bucket_versioning.go | 2 +- fptcloud/object-storage/resource_sub_user.go | 4 +- .../object-storage/resource_sub_user_keys.go | 28 ++-- main.go | 1 - 23 files changed, 241 insertions(+), 229 deletions(-) diff --git a/fptcloud/object-storage/dataqsource_object_storage_bucket_acl.go b/fptcloud/object-storage/dataqsource_object_storage_bucket_acl.go index 49ea7b2..0237cd7 100644 --- a/fptcloud/object-storage/dataqsource_object_storage_bucket_acl.go +++ b/fptcloud/object-storage/dataqsource_object_storage_bucket_acl.go @@ -13,24 +13,19 @@ func DataSourceBucketAcl() *schema.Resource { return &schema.Resource{ ReadContext: dataSourceBucketAclRead, Schema: map[string]*schema.Schema{ - "vpc_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: "The VPC ID", - }, "bucket_name": { Type: schema.TypeString, Required: true, ForceNew: true, Description: "Name of the bucket to config the ACL", }, - "region_name": { + "vpc_id": { Type: schema.TypeString, Required: true, ForceNew: true, - Description: "The region name that's are the same with the region name in the S3 service. Currently, we have: HCM-01, HCM-02, HN-01, HN-02", + Description: "The VPC ID", }, + "canned_acl": { Type: schema.TypeString, Computed: true, @@ -41,6 +36,12 @@ func DataSourceBucketAcl() *schema.Resource { Computed: true, Description: "The status after configuring the bucket ACL", }, + "region_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The region name that's are the same with the region name in the S3 service. Currently, we have: HCM-01, HCM-02, HN-01, HN-02", + }, "bucket_acl": { Type: schema.TypeList, Computed: true, @@ -104,12 +105,12 @@ func DataSourceBucketAcl() *schema.Resource { func dataSourceBucketAclRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { client := m.(*common.Client) service := NewObjectStorageService(client) - vpcId := d.Get("vpc_id").(string) bucketName := d.Get("bucket_name").(string) + vpcId := d.Get("vpc_id").(string) regionName := d.Get("region_name").(string) s3ServiceDetail := getServiceEnableRegion(service, vpcId, regionName) if s3ServiceDetail.S3ServiceId == "" { - return diag.FromErr(fmt.Errorf("region %s is not enabled", regionName)) + return diag.FromErr(fmt.Errorf(regionError, regionName)) } r := service.GetBucketAcl(vpcId, s3ServiceDetail.S3ServiceId, bucketName) if !r.Status { diff --git a/fptcloud/object-storage/datasource_object_storage_access_key.go b/fptcloud/object-storage/datasource_object_storage_access_key.go index 9296b70..7b8e49f 100644 --- a/fptcloud/object-storage/datasource_object_storage_access_key.go +++ b/fptcloud/object-storage/datasource_object_storage_access_key.go @@ -50,7 +50,7 @@ func dataSourceAccessKeyRead(ctx context.Context, d *schema.ResourceData, m inte regionName := d.Get("region_name").(string) s3ServiceDetail := getServiceEnableRegion(service, vpcId, regionName) if s3ServiceDetail.S3ServiceId == "" { - return diag.FromErr(fmt.Errorf("region %s is not enabled", regionName)) + return diag.FromErr(fmt.Errorf(regionError, regionName)) } keys, err := service.ListAccessKeys(vpcId, s3ServiceDetail.S3ServiceId) if err != nil { diff --git a/fptcloud/object-storage/datasource_object_storage_bucket.go b/fptcloud/object-storage/datasource_object_storage_bucket.go index f09fc39..d48e3c4 100644 --- a/fptcloud/object-storage/datasource_object_storage_bucket.go +++ b/fptcloud/object-storage/datasource_object_storage_bucket.go @@ -86,7 +86,7 @@ func dataSourceBucketRead(ctx context.Context, d *schema.ResourceData, m interfa regionName := d.Get("region_name").(string) s3ServiceDetail := getServiceEnableRegion(service, vpcId, regionName) if s3ServiceDetail.S3ServiceId == "" { - return diag.FromErr(fmt.Errorf("region %s is not enabled", regionName)) + return diag.FromErr(fmt.Errorf(regionError, regionName)) } buckets := service.ListBuckets(vpcId, s3ServiceDetail.S3ServiceId, page, pageSize) if buckets.Total == 0 { diff --git a/fptcloud/object-storage/datasource_object_storage_bucket_cors.go b/fptcloud/object-storage/datasource_object_storage_bucket_cors.go index 4a3f907..b15ac6c 100644 --- a/fptcloud/object-storage/datasource_object_storage_bucket_cors.go +++ b/fptcloud/object-storage/datasource_object_storage_bucket_cors.go @@ -94,7 +94,7 @@ func dataSourceBucketCorsRead(ctx context.Context, d *schema.ResourceData, m int vpcId := d.Get("vpc_id").(string) s3ServiceDetail := getServiceEnableRegion(service, vpcId, d.Get("region_name").(string)) if s3ServiceDetail.S3ServiceId == "" { - return diag.FromErr(fmt.Errorf("region %s is not enabled", d.Get("region_name").(string))) + return diag.FromErr(fmt.Errorf(regionError, d.Get("region_name").(string))) } bucketName := d.Get("bucket_name").(string) page := 1 diff --git a/fptcloud/object-storage/datasource_object_storage_bucket_lifecycle.go b/fptcloud/object-storage/datasource_object_storage_bucket_lifecycle.go index 4fcee2f..8f57915 100644 --- a/fptcloud/object-storage/datasource_object_storage_bucket_lifecycle.go +++ b/fptcloud/object-storage/datasource_object_storage_bucket_lifecycle.go @@ -113,41 +113,9 @@ func DataSourceBucketLifecycle() *schema.Resource { }, } } - -func dataSourceBucketLifecycleRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { - client := m.(*common.Client) - service := NewObjectStorageService(client) - - bucketName := d.Get("bucket_name").(string) - vpcId := d.Get("vpc_id").(string) - regionName := d.Get("region_name").(string) - s3ServiceDetail := getServiceEnableRegion(service, vpcId, regionName) - if s3ServiceDetail.S3ServiceId == "" { - return diag.FromErr(fmt.Errorf("region %s is not enabled", regionName)) - } - page := 1 - v, ok := d.GetOk("page") - if ok { - page = v.(int) - } - pageSize := 25 - v, ok = d.GetOk("page_size") - if ok { - pageSize = v.(int) - } - - lifeCycleResponse := service.GetBucketLifecycle(vpcId, s3ServiceDetail.S3ServiceId, bucketName, page, pageSize) - if !lifeCycleResponse.Status { - return diag.FromErr(fmt.Errorf("failed to fetch life cycle rules for bucket %s", bucketName)) - } - d.SetId(bucketName) +func parseData(lifeCycleResponse BucketLifecycleResponse) []interface{} { var formattedData []interface{} - if lifeCycleResponse.Total == 0 { - if err := d.Set("life_cycle_rules", make([]interface{}, 0)); err != nil { - d.SetId("") - return diag.FromErr(err) - } - } + for _, lifecycleRule := range lifeCycleResponse.Rules { data := map[string]interface{}{ "id": lifecycleRule.ID, @@ -191,7 +159,42 @@ func dataSourceBucketLifecycleRead(ctx context.Context, d *schema.ResourceData, } formattedData = append(formattedData, data) } + return formattedData +} +func dataSourceBucketLifecycleRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { + client := m.(*common.Client) + service := NewObjectStorageService(client) + bucketName := d.Get("bucket_name").(string) + vpcId := d.Get("vpc_id").(string) + regionName := d.Get("region_name").(string) + s3ServiceDetail := getServiceEnableRegion(service, vpcId, regionName) + if s3ServiceDetail.S3ServiceId == "" { + return diag.FromErr(fmt.Errorf(regionError, regionName)) + } + page := 1 + v, ok := d.GetOk("page") + if ok { + page = v.(int) + } + pageSize := 25 + v, ok = d.GetOk("page_size") + if ok { + pageSize = v.(int) + } + + lifeCycleResponse := service.GetBucketLifecycle(vpcId, s3ServiceDetail.S3ServiceId, bucketName, page, pageSize) + if !lifeCycleResponse.Status { + return diag.FromErr(fmt.Errorf("failed to fetch life cycle rules for bucket %s", bucketName)) + } + if lifeCycleResponse.Total == 0 { + if err := d.Set("life_cycle_rules", make([]interface{}, 0)); err != nil { + d.SetId("") + return diag.FromErr(err) + } + } + d.SetId(bucketName) + formattedData := parseData(lifeCycleResponse) if err := d.Set("life_cycle_rules", formattedData); err != nil { d.SetId("") return diag.FromErr(err) diff --git a/fptcloud/object-storage/datasource_object_storage_bucket_policy.go b/fptcloud/object-storage/datasource_object_storage_bucket_policy.go index b7d158b..7f73ecc 100644 --- a/fptcloud/object-storage/datasource_object_storage_bucket_policy.go +++ b/fptcloud/object-storage/datasource_object_storage_bucket_policy.go @@ -45,7 +45,7 @@ func dataSourceBucketPolicyRead(ctx context.Context, d *schema.ResourceData, m i vpcId := d.Get("vpc_id").(string) s3ServiceDetail := getServiceEnableRegion(service, vpcId, d.Get("region_name").(string)) if s3ServiceDetail.S3ServiceId == "" { - return diag.FromErr(fmt.Errorf("region %s is not enabled", d.Get("region_name").(string))) + return diag.FromErr(fmt.Errorf(regionError, d.Get("region_name").(string))) } policyResponse := service.GetBucketPolicy(vpcId, s3ServiceDetail.S3ServiceId, bucketName) if !policyResponse.Status { diff --git a/fptcloud/object-storage/datasource_object_storage_bucket_static_website.go b/fptcloud/object-storage/datasource_object_storage_bucket_static_website.go index 882334a..39d64de 100644 --- a/fptcloud/object-storage/datasource_object_storage_bucket_static_website.go +++ b/fptcloud/object-storage/datasource_object_storage_bucket_static_website.go @@ -48,7 +48,7 @@ func dataSourceBucketStaticWebsite(ctx context.Context, d *schema.ResourceData, vpcId := d.Get("vpc_id").(string) s3ServiceDetail := getServiceEnableRegion(service, vpcId, d.Get("region_name").(string)) if s3ServiceDetail.S3ServiceId == "" { - return diag.FromErr(fmt.Errorf("region %s is not enabled", d.Get("region_name").(string))) + return diag.FromErr(fmt.Errorf(regionError, d.Get("region_name").(string))) } staticWebsiteResponse := service.GetBucketWebsite(vpcId, s3ServiceDetail.S3ServiceId, bucketName) diff --git a/fptcloud/object-storage/datasource_object_storage_bucket_versioning.go b/fptcloud/object-storage/datasource_object_storage_bucket_versioning.go index e9eb8f7..28250f9 100644 --- a/fptcloud/object-storage/datasource_object_storage_bucket_versioning.go +++ b/fptcloud/object-storage/datasource_object_storage_bucket_versioning.go @@ -47,7 +47,7 @@ func dataSourceBucketVersioningRead(ctx context.Context, d *schema.ResourceData, vpcId := d.Get("vpc_id").(string) s3ServiceDetail := getServiceEnableRegion(service, vpcId, d.Get("region_name").(string)) if s3ServiceDetail.S3ServiceId == "" { - return diag.FromErr(fmt.Errorf("region %s is not enabled", d.Get("region_name").(string))) + return diag.FromErr(fmt.Errorf(regionError, d.Get("region_name").(string))) } bucketName := d.Get("bucket_name").(string) diff --git a/fptcloud/object-storage/datasource_object_storage_sub_user.go b/fptcloud/object-storage/datasource_object_storage_sub_user.go index ad32393..035789a 100644 --- a/fptcloud/object-storage/datasource_object_storage_sub_user.go +++ b/fptcloud/object-storage/datasource_object_storage_sub_user.go @@ -70,7 +70,7 @@ func dataSourceSubUserRead(ctx context.Context, d *schema.ResourceData, m interf regionName := d.Get("region_name").(string) s3ServiceDetail := getServiceEnableRegion(service, vpcId, regionName) if s3ServiceDetail.S3ServiceId == "" { - return diag.FromErr(fmt.Errorf("region %s is not enabled", regionName)) + return diag.FromErr(fmt.Errorf(regionError, regionName)) } page := 1 pageSize := 100 diff --git a/fptcloud/object-storage/datasource_object_storage_sub_user_detail.go b/fptcloud/object-storage/datasource_object_storage_sub_user_detail.go index 8befde7..546bf8c 100644 --- a/fptcloud/object-storage/datasource_object_storage_sub_user_detail.go +++ b/fptcloud/object-storage/datasource_object_storage_sub_user_detail.go @@ -58,20 +58,22 @@ func DataSourceSubUserDetail() *schema.Resource { func dataSourceSubUserDetailRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { client := m.(*common.Client) objectStorageService := NewObjectStorageService(client) - vpcId := d.Get("vpc_id").(string) - s3ServiceDetail := getServiceEnableRegion(objectStorageService, vpcId, d.Get("region_name").(string)) + regionName := d.Get("region_name").(string) + + s3ServiceDetail := getServiceEnableRegion(objectStorageService, vpcId, regionName) if s3ServiceDetail.S3ServiceId == "" { - return diag.FromErr(fmt.Errorf("region %s is not enabled", d.Get("region_name").(string))) + return diag.FromErr(fmt.Errorf(regionError, regionName)) } - subUserId := d.Get("user_id").(string) + subUserId := d.Get("user_id").(string) subUser := objectStorageService.DetailSubUser(vpcId, s3ServiceDetail.S3ServiceId, subUserId) + d.SetId(subUser.UserID) if subUser.UserID == "" { + d.SetId("") return diag.Errorf("sub-user with ID %s not found", subUserId) } - d.SetId(subUser.UserID) if err := d.Set("user_id", subUser.UserID); err != nil { return diag.FromErr(err) } diff --git a/fptcloud/object-storage/object_storage_service.go b/fptcloud/object-storage/object_storage_service.go index 984d0c9..966c200 100644 --- a/fptcloud/object-storage/object_storage_service.go +++ b/fptcloud/object-storage/object_storage_service.go @@ -6,6 +6,10 @@ import ( common "terraform-provider-fptcloud/commons" ) +const ( + regionError = "region %s is not enabled" +) + // ObjectStorageService defines the interface for object storage operations type ObjectStorageService interface { CheckServiceEnable(vpcId string) S3ServiceEnableResponse diff --git a/fptcloud/object-storage/object_storage_service_test.go b/fptcloud/object-storage/object_storage_service_test.go index 8faf773..55e0bd7 100644 --- a/fptcloud/object-storage/object_storage_service_test.go +++ b/fptcloud/object-storage/object_storage_service_test.go @@ -9,7 +9,7 @@ import ( "github.com/stretchr/testify/assert" ) -func TestCreateResourceAccessKey_ReturnsResourceAccessKeyIDWhenSuccess(t *testing.T) { +func TestCreateResourceAccessKeyReturnsResourceAccessKeyIDWhenSuccess(t *testing.T) { mockResponse := `{ "status": true, "message": "Create resource access key successfully", @@ -34,7 +34,7 @@ func TestCreateResourceAccessKey_ReturnsResourceAccessKeyIDWhenSuccess(t *testin assert.Equal(t, "Create resource access key successfully", resourceAccessKeyID.Message) } -func TestCreateResourceAccessKey_ReturnsErrorWhenFailed(t *testing.T) { +func TestCreateResourceAccessKeyReturnsErrorWhenFailed(t *testing.T) { mockResponse := `{ "status": false, "message": "Failed to create resource access key", @@ -56,7 +56,7 @@ func TestCreateResourceAccessKey_ReturnsErrorWhenFailed(t *testing.T) { assert.Equal(t, "Failed to create resource access key", resourceAccessKeyID.Message) } -func TestDeleteResouurceAccessKey_ReturnOkWhenSuccess(t *testing.T) { +func TestDeleteResouurceAccessKeyReturnOkWhenSuccess(t *testing.T) { mockResponse := `{ "status": true, "message": "Delete resource access key successfully" @@ -75,7 +75,7 @@ func TestDeleteResouurceAccessKey_ReturnOkWhenSuccess(t *testing.T) { assert.Equal(t, "Access key deleted successfully", res.Message) } -func TestListAccessKeys_ReturnAccessKeysWhenSuccess(t *testing.T) { +func TestListAccessKeysReturnAccessKeysWhenSuccess(t *testing.T) { mockResponse := `{ "credentials": [ { @@ -103,7 +103,7 @@ func TestListAccessKeys_ReturnAccessKeysWhenSuccess(t *testing.T) { assert.Equal(t, true, accessKeys.Credentials[0].Credentials[0].Active) } -func TestCreateBucket_ReturnsBucketIDWhenSuccess(t *testing.T) { +func TestCreateBucketReturnsBucketIDWhenSuccess(t *testing.T) { mockResponse := `{ "status": true, "message": "Create bucket successfully" @@ -124,7 +124,7 @@ func TestCreateBucket_ReturnsBucketIDWhenSuccess(t *testing.T) { assert.Equal(t, true, r.Status) } -func TestCreateBucket_ReturnsErrorWhenFailed(t *testing.T) { +func TestCreateBucketReturnsErrorWhenFailed(t *testing.T) { mockResponse := `{ "status": false, "message": "Failed to create bucket", @@ -145,7 +145,7 @@ func TestCreateBucket_ReturnsErrorWhenFailed(t *testing.T) { assert.Equal(t, false, r.Status) } -func TestListBuckets_ReturnsBucketsWhenSuccess(t *testing.T) { +func TestListBucketsReturnsBucketsWhenSuccess(t *testing.T) { mockResponse := `{ "buckets": [ { @@ -176,7 +176,7 @@ func TestListBuckets_ReturnsBucketsWhenSuccess(t *testing.T) { assert.Equal(t, "https://xxxx-xxx.xyz.com", buckets.Buckets[0].Endpoint) } -func TestListBuckets_ReturnsErrorWhenFailed(t *testing.T) { +func TestListBucketsReturnsErrorWhenFailed(t *testing.T) { mockResponse := `{ "buckets": [], "total": 0 @@ -193,7 +193,7 @@ func TestListBuckets_ReturnsErrorWhenFailed(t *testing.T) { assert.Equal(t, 0, buckets.Total) } -func TestDeleteBucket_ReturnsOkWhenSuccess(t *testing.T) { +func TestDeleteBucketReturnsOkWhenSuccess(t *testing.T) { mockResponse := `{ "status": true }` @@ -210,7 +210,7 @@ func TestDeleteBucket_ReturnsOkWhenSuccess(t *testing.T) { assert.Equal(t, true, res.Status) } -func TestCreateSubUser_ReturnsTrueWhenSuccess(t *testing.T) { +func TestCreateSubUserReturnsTrueWhenSuccess(t *testing.T) { mockResponse := `{ "status": true, "message": "Sub-user created successfully" @@ -232,7 +232,7 @@ func TestCreateSubUser_ReturnsTrueWhenSuccess(t *testing.T) { assert.Equal(t, "Sub-user created successfully", r.Message) } -func TestCreateSubUser_ReturnsFalseWhenFailed(t *testing.T) { +func TestCreateSubUserReturnsFalseWhenFailed(t *testing.T) { mockResponse := `{ "status": false }` @@ -252,7 +252,7 @@ func TestCreateSubUser_ReturnsFalseWhenFailed(t *testing.T) { assert.Equal(t, false, r.Status) } -func TestDeleteSubUser_ReturnOkWhenSuccess(t *testing.T) { +func TestDeleteSubUserReturnOkWhenSuccess(t *testing.T) { mockResponse := `{}` mockClient, server, _ := common.NewClientForTesting(map[string]string{ "/v1/vmware/vpc/vpc_id/s3/s3_service_id/sub-users/sub_user_id/delete": mockResponse, @@ -266,7 +266,7 @@ func TestDeleteSubUser_ReturnOkWhenSuccess(t *testing.T) { assert.Nil(t, err) } -func TestListSubUsers_ReturnsSubUsersWhenSuccess(t *testing.T) { +func TestListSubUsersReturnsSubUsersWhenSuccess(t *testing.T) { mockResponse := `{ "sub_users": [ { @@ -295,7 +295,7 @@ func TestListSubUsers_ReturnsSubUsersWhenSuccess(t *testing.T) { assert.Equal(t, "SubUserReadWrite", subUsers.SubUsers[0].Role) } -func TestListSubUsers_ReturnsErrorWhenFailed(t *testing.T) { +func TestListSubUsersReturnsErrorWhenFailed(t *testing.T) { mockResponse := `{ "sub_users": [], "total": 0, @@ -313,7 +313,7 @@ func TestListSubUsers_ReturnsErrorWhenFailed(t *testing.T) { assert.Equal(t, 0, subUsers.Total) } -func TestGetDetailSubUser_ReturnOkWhenSuccess(t *testing.T) { +func TestGetDetailSubUserReturnOkWhenSuccess(t *testing.T) { mockResponse := ` { "user_id": "sgn-replicate123123", @@ -337,7 +337,7 @@ func TestGetDetailSubUser_ReturnOkWhenSuccess(t *testing.T) { assert.Equal(t, "SubUserReadWrite", subUser.Role) } -func TestCreateSubUserAccessKey_ReturnsAccessKeyWhenSuccess(t *testing.T) { +func TestCreateSubUserAccessKeyReturnsAccessKeyWhenSuccess(t *testing.T) { mockResponse := `{ "status": true, "credential": { @@ -360,7 +360,7 @@ func TestCreateSubUserAccessKey_ReturnsAccessKeyWhenSuccess(t *testing.T) { assert.Equal(t, true, accessKey.Status) } -func TestCreateSubUserAccessKey_ReturnsErrorWhenFailed(t *testing.T) { +func TestCreateSubUserAccessKeyReturnsErrorWhenFailed(t *testing.T) { mockResponse := `{ "status": false, }` @@ -379,7 +379,7 @@ func TestCreateSubUserAccessKey_ReturnsErrorWhenFailed(t *testing.T) { assert.Equal(t, false, accessKey.Status) } -func TestDeleteSubUserAccessKey_ReturnOkWhenSuccess(t *testing.T) { +func TestDeleteSubUserAccessKeyReturnOkWhenSuccess(t *testing.T) { mockResponse := `{ "status": true }` @@ -397,7 +397,7 @@ func TestDeleteSubUserAccessKey_ReturnOkWhenSuccess(t *testing.T) { assert.Equal(t, true, res.Status) } -func TestPutBucketPolicy_ReturnOkWhenSuccess(t *testing.T) { +func TestPutBucketPolicyReturnOkWhenSuccess(t *testing.T) { mockResponse := `{ "status": true }` @@ -416,7 +416,7 @@ func TestPutBucketPolicy_ReturnOkWhenSuccess(t *testing.T) { assert.Equal(t, true, res.Status) } -func TestGetBucketPolicy_ReturnsPolicyWhenSuccess(t *testing.T) { +func TestGetBucketPolicyReturnsPolicyWhenSuccess(t *testing.T) { mockResponse := `{ "policy": "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Sid\":\"AllowAllS3Actions\",\"Effect\":\"Allow\",\"Principal\":\"*\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::bucket_name/*\"}]}", "status": true @@ -435,7 +435,7 @@ func TestGetBucketPolicy_ReturnsPolicyWhenSuccess(t *testing.T) { assert.Equal(t, true, policy.Status) } -func TestGetBucketPolicy_ReturnsFalseWhenFailed(t *testing.T) { +func TestGetBucketPolicyReturnsFalseWhenFailed(t *testing.T) { mockResponse := `{ "policy": "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Sid\":\"AllowAllS3Actions\",\"Effect\":\"Allow\",\"Principal\":\"*\",\"Action\":\"s3:*\",\"Resource\":\"arn:aws:s3:::bucket_name/*\"}]}", "status": false, @@ -453,7 +453,7 @@ func TestGetBucketPolicy_ReturnsFalseWhenFailed(t *testing.T) { assert.Equal(t, false, policy.Status) } -func TestCreateBucketCors_ReturnOkWhenSuccess(t *testing.T) { +func TestCreateBucketCorsReturnOkWhenSuccess(t *testing.T) { mockResponse := `{ "status": true }` @@ -471,7 +471,7 @@ func TestCreateBucketCors_ReturnOkWhenSuccess(t *testing.T) { assert.Equal(t, true, res.Status) } -func TestUpdateBucketCors_ReturnOkWhenSuccess(t *testing.T) { +func TestUpdateBucketCorsReturnOkWhenSuccess(t *testing.T) { mockResponse := `{ "status": true }` @@ -490,7 +490,7 @@ func TestUpdateBucketCors_ReturnOkWhenSuccess(t *testing.T) { assert.Equal(t, true, res.Status) } -func TestGetBucketCors_ReturnCorsWhenSuccess(t *testing.T) { +func TestGetBucketCorsReturnCorsWhenSuccess(t *testing.T) { mockResponse := `{ "cors_rules": [ { @@ -515,7 +515,7 @@ func TestGetBucketCors_ReturnCorsWhenSuccess(t *testing.T) { assert.Equal(t, "*", cors.CorsRules[0].AllowedHeaders[0]) } -func TestGetBucketCors_ReturnFalseWhenFailed(t *testing.T) { +func TestGetBucketCorsReturnFalseWhenFailed(t *testing.T) { mockResponse := `{ "cors_rules": [], "status": false, @@ -532,7 +532,7 @@ func TestGetBucketCors_ReturnFalseWhenFailed(t *testing.T) { assert.NotNil(t, err) } -func TestPutBucketVersioning_ReturnNilWhenSuccess(t *testing.T) { +func TestPutBucketVersioningReturnNilWhenSuccess(t *testing.T) { mockResponse := `{ "status": true }` @@ -549,7 +549,7 @@ func TestPutBucketVersioning_ReturnNilWhenSuccess(t *testing.T) { assert.Nil(t, res) } -func TestGetBucketVersioning_ReturnBucketVersioning(t *testing.T) { +func TestGetBucketVersioningReturnBucketVersioning(t *testing.T) { mockResponse := `{ "status": true, "config": "Enabled" @@ -565,7 +565,7 @@ func TestGetBucketVersioning_ReturnBucketVersioning(t *testing.T) { assert.Equal(t, true, versioning.Status) } -func TestPutBucketAcl_ReturnAclWhenSuccess(t *testing.T) { +func TestPutBucketAclReturnAclWhenSuccess(t *testing.T) { mockResponse := `{ "status": true, "taskId": "task_id" @@ -586,7 +586,7 @@ func TestPutBucketAcl_ReturnAclWhenSuccess(t *testing.T) { assert.Equal(t, "task_id", res.TaskID) } -func TestGetBucketAcl_ReturnAclWhenSuccess(t *testing.T) { +func TestGetBucketAclReturnAclWhenSuccess(t *testing.T) { mockResponse := `{ "status": true, "Owner": { @@ -622,7 +622,7 @@ func TestGetBucketAcl_ReturnAclWhenSuccess(t *testing.T) { assert.Equal(t, "FULL_CONTROL", acl.Grants[0].Permission) } -func TestGetBucketAcl_ReturnFalseWhenFailed(t *testing.T) { +func TestGetBucketAclReturnFalseWhenFailed(t *testing.T) { mockResponse := `{ "status": false }` @@ -637,7 +637,7 @@ func TestGetBucketAcl_ReturnFalseWhenFailed(t *testing.T) { assert.Equal(t, false, acl.Status) } -func TestGetBucketAcl_ReturnFalseWhenFailedUnmarshalJson(t *testing.T) { +func TestGetBucketAclReturnFalseWhenFailedUnmarshalJson(t *testing.T) { mockResponse := `{ "status": false,,, }` @@ -652,7 +652,7 @@ func TestGetBucketAcl_ReturnFalseWhenFailedUnmarshalJson(t *testing.T) { assert.Equal(t, false, acl.Status) } -func TestPutBucketWebsite_ReturnOkWhenSuccess(t *testing.T) { +func TestPutBucketWebsiteReturnOkWhenSuccess(t *testing.T) { mockResponse := `{ "status": true, }` @@ -672,7 +672,7 @@ func TestPutBucketWebsite_ReturnOkWhenSuccess(t *testing.T) { assert.Equal(t, true, res.Status) } -func TestPutBucketWebsite_ReturnOFalseWhenFailed(t *testing.T) { +func TestPutBucketWebsiteReturnOFalseWhenFailed(t *testing.T) { mockResponse := `{ "status": false, }` @@ -683,7 +683,7 @@ func TestPutBucketWebsite_ReturnOFalseWhenFailed(t *testing.T) { service := fptcloud_object_storage.NewObjectStorageService(mockClient) bucketName := "bucket_name" website := fptcloud_object_storage.BucketWebsiteRequest{ - Key: "index.html", + Key: "example.html", Suffix: "index2.html", Bucket: "bucket_name", } @@ -692,7 +692,7 @@ func TestPutBucketWebsite_ReturnOFalseWhenFailed(t *testing.T) { assert.Equal(t, true, res.Status) } -func TestDeleteBucketStaticWebsite_ReturnTrueWhenSuccess(t *testing.T) { +func TestDeleteBucketStaticWebsiteReturnTrueWhenSuccess(t *testing.T) { mockResponse := `{ "status": true }` @@ -707,7 +707,7 @@ func TestDeleteBucketStaticWebsite_ReturnTrueWhenSuccess(t *testing.T) { assert.Equal(t, true, res.Status) } -func TestDeleteBucketStaticWebsite_ReturnFalseWhenError(t *testing.T) { +func TestDeleteBucketStaticWebsiteReturnFalseWhenError(t *testing.T) { mockResponse := `{ "status": false }` @@ -722,7 +722,7 @@ func TestDeleteBucketStaticWebsite_ReturnFalseWhenError(t *testing.T) { assert.Equal(t, true, res.Status) } -func TestGetBucketWebsite_ReturnWebsiteWhenSuccess(t *testing.T) { +func TestGetBucketWebsiteReturnWebsiteWhenSuccess(t *testing.T) { mockResponse := `{ "status": true, "config": { @@ -741,7 +741,7 @@ func TestGetBucketWebsite_ReturnWebsiteWhenSuccess(t *testing.T) { "RetryAttempts": 0 }, "IndexDocument": { - "Suffix": "index.html" + "Suffix": "some_index.html" }, "ErrorDocument": { "Key": "error.html" @@ -757,11 +757,11 @@ func TestGetBucketWebsite_ReturnWebsiteWhenSuccess(t *testing.T) { website := service.GetBucketWebsite("vpc_id", "s3_service_id", bucketName) assert.NotNil(t, website) assert.Equal(t, true, website.Status) - assert.Equal(t, "index.html", website.Config.IndexDocument.Suffix) + assert.Equal(t, "some_index.html", website.Config.IndexDocument.Suffix) assert.Equal(t, "error.html", website.Config.ErrorDocument.Key) } -func TestGetBucketWebsite_ReturnFalseWhenError(t *testing.T) { +func TestGetBucketWebsiteReturnFalseWhenError(t *testing.T) { mockResponse := `{ "status": false }` @@ -776,7 +776,7 @@ func TestGetBucketWebsite_ReturnFalseWhenError(t *testing.T) { assert.Equal(t, false, website.Status) } -func TestGetBucketLifecycle_ReturnRuleWhenSuccess(t *testing.T) { +func TestGetBucketLifecycleReturnRuleWhenSuccess(t *testing.T) { mockResponse := `{ "status": true, "rules": [ @@ -807,7 +807,7 @@ func TestGetBucketLifecycle_ReturnRuleWhenSuccess(t *testing.T) { assert.Equal(t, 1, lifecycle.Total) } -func TestGetBucketLifecycle_ReturnFalseWhenFailed(t *testing.T) { +func TestGetBucketLifecycleReturnFalseWhenFailed(t *testing.T) { mockResponse := `{ "status": false, "rules": [], @@ -825,22 +825,22 @@ func TestGetBucketLifecycle_ReturnFalseWhenFailed(t *testing.T) { assert.Equal(t, 0, lifecycle.Total) } -func TestPutBucketLifecycle_ReturnOkWhenSuccess(t *testing.T) { +func TestPutBucketLifecycleReturnOkWhenSuccess(t *testing.T) { mockResponse := `{ "status": true }` mockClient, server, _ := common.NewClientForTesting(map[string]string{ - "/v1/vmware/vpc/vpc_id/s3/s3_service_id/bucket/bucket_name/create-bucket-lifecycle-configuration": mockResponse, + "/v1/vmware/vpc/vpc_id/s3/s3_service_id/bucket/bucket_name22/create-bucket-lifecycle-configuration": mockResponse, }) defer server.Close() service := fptcloud_object_storage.NewObjectStorageService(mockClient) - bucketName := "bucket_name" + bucketName := "bucket_name22" rule := map[string]interface{}{ "ID": "rule_id", - "Prefix": "prefix", - "Status": "Enabled", + "Prefix": "prefix2222222", + "Status": "Disabled", "Expiration": map[string]interface{}{ - "Days": 30, + "Days": 8, }, } res := service.PutBucketLifecycle("vpc_id", "s3_service_id", bucketName, rule) @@ -848,30 +848,30 @@ func TestPutBucketLifecycle_ReturnOkWhenSuccess(t *testing.T) { assert.Equal(t, true, res.Status) } -func TestPutBucketLifecycle_ReturnFalseWhenError(t *testing.T) { +func TestPutBucketLifecycleReturnFalseWhenError(t *testing.T) { mockResponse := `{ "status": false }` mockClient, server, _ := common.NewClientForTesting(map[string]string{ - "/v1/vmware/vpc/vpc_id/s3/s3_service_id/bucket/bucket_name/create-bucket-lifecycle-configuration-wrong-endpoint": mockResponse, + "/v1/vmware/vpc/vpc_id/s3/s3_service_id/bucket/bucket_name1111/create-bucket-lifecycle-configuration-wrong-endpoint": mockResponse, }) defer server.Close() service := fptcloud_object_storage.NewObjectStorageService(mockClient) - bucketName := "bucket_name" + bucketName := "bucket_name1111" rule := map[string]interface{}{ - "ID": "rule_id", - "Prefix": "prefix", - "Status": "Enabled", + "ID": "rule_id", "Expiration": map[string]interface{}{ - "Days": 30, + "Days": 90, }, + "Prefix": "filer", + "Status": "Enabled", } res := service.PutBucketLifecycle("vpc_id", "s3_service_id", bucketName, rule) assert.NotNil(t, res) assert.Equal(t, false, res.Status) } -func TestPutBucketLifecycle_ReturnFalseWhenErrorUnmarshalJson(t *testing.T) { +func TestPutBucketLifecycleReturnFalseWhenErrorUnmarshalJson(t *testing.T) { mockResponse := `{ "status": false,,,,@#$@#$234 }` @@ -882,19 +882,19 @@ func TestPutBucketLifecycle_ReturnFalseWhenErrorUnmarshalJson(t *testing.T) { service := fptcloud_object_storage.NewObjectStorageService(mockClient) bucketName := "bucket_name" rule := map[string]interface{}{ - "ID": "rule_id", - "Prefix": "prefix", "Status": "Enabled", + "ID": "rule_id", "Expiration": map[string]interface{}{ "Days": 30, }, + "Prefix": "prefix", } res := service.PutBucketLifecycle("vpc_id", "s3_service_id", bucketName, rule) assert.NotNil(t, res) assert.Equal(t, false, res.Status) } -func TestDeleteBucketLifecycle_ReturnOkWhenSuccess(t *testing.T) { +func TestDeleteBucketLifecycleReturnOkWhenSuccess(t *testing.T) { mockResponse := `{ "status": true }` @@ -905,19 +905,19 @@ func TestDeleteBucketLifecycle_ReturnOkWhenSuccess(t *testing.T) { service := fptcloud_object_storage.NewObjectStorageService(mockClient) bucketName := "bucket_name" rule := map[string]interface{}{ - "ID": "rule_id", - "Prefix": "prefix", - "Status": "Enabled", + "ID": "rule_id", "Expiration": map[string]interface{}{ - "Days": 30, + "Days": 12, }, + "Prefix": "prefix", + "Status": "Enabled", } res := service.DeleteBucketLifecycle("vpc_id", "s3_service_id", bucketName, rule) assert.NotNil(t, res) assert.Equal(t, true, res.Status) } -func TestDeleteBucketLifecycle_ReturnFalseWhenError(t *testing.T) { +func TestDeleteBucketLifecycleReturnFalseWhenError(t *testing.T) { mockResponse := `{ "status": false }` @@ -928,11 +928,13 @@ func TestDeleteBucketLifecycle_ReturnFalseWhenError(t *testing.T) { service := fptcloud_object_storage.NewObjectStorageService(mockClient) bucketName := "bucket_name" rule := map[string]interface{}{ - "ID": "rule_id", - "Prefix": "prefix", - "Status": "Enabled", + "ID": "rule_id", + "Prefix": map[string]interface{}{ + "Filter": "filter", + }, + "Status": "Disabled", "Expiration": map[string]interface{}{ - "Days": 30, + "Days": 12, }, } res := service.DeleteBucketLifecycle("vpc_id", "s3_service_id", bucketName, rule) @@ -940,7 +942,7 @@ func TestDeleteBucketLifecycle_ReturnFalseWhenError(t *testing.T) { assert.Equal(t, false, res.Status) } -func TestDeleteBucketLifecycle_ReturnFalseWhenErrorUnmarshalJson(t *testing.T) { +func TestDeleteBucketLifecycleReturnFalseWhenErrorUnmarshalJson(t *testing.T) { mockResponse := `{ "status": false,,,,@#$@#$234 }` @@ -951,19 +953,19 @@ func TestDeleteBucketLifecycle_ReturnFalseWhenErrorUnmarshalJson(t *testing.T) { service := fptcloud_object_storage.NewObjectStorageService(mockClient) bucketName := "bucket_name" rule := map[string]interface{}{ - "ID": "rule_id", "Prefix": "prefix", - "Status": "Enabled", + "ID": "rule_id9999", "Expiration": map[string]interface{}{ "Days": 30, }, + "Status": "Disabled", } res := service.DeleteBucketLifecycle("vpc_id", "s3_service_id", bucketName, rule) assert.NotNil(t, res) assert.Equal(t, false, res.Status) } -func TestCheckServiceEnable_ReturnServicesWhenSuccess(t *testing.T) { +func TestCheckServiceEnableReturnServicesWhenSuccess(t *testing.T) { mockResponse := `{ "data": [ { @@ -994,7 +996,7 @@ func TestCheckServiceEnable_ReturnServicesWhenSuccess(t *testing.T) { assert.Equal(t, "ceph", services.Data[0].S3Platform) } -func TestCheckServiceEnable_ReturnFalseWhenError(t *testing.T) { +func TestCheckServiceEnableReturnFalseWhenError(t *testing.T) { mockResponse := `{ "total": 0 }` @@ -1010,7 +1012,7 @@ func TestCheckServiceEnable_ReturnFalseWhenError(t *testing.T) { assert.Equal(t, 0, services.Total) } -func TestCheckServiceEnable_ReturnFalseWhenErrorUnmarshal(t *testing.T) { +func TestCheckServiceEnableReturnFalseWhenErrorUnmarshal(t *testing.T) { mockResponse := `{ "total": #$%#$%#$%#$%#$%!@#!23, }` diff --git a/fptcloud/object-storage/resource_access_key.go b/fptcloud/object-storage/resource_access_key.go index 8af42ec..9970256 100644 --- a/fptcloud/object-storage/resource_access_key.go +++ b/fptcloud/object-storage/resource_access_key.go @@ -3,7 +3,6 @@ package fptcloud_object_storage import ( "context" "fmt" - "log" common "terraform-provider-fptcloud/commons" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -66,7 +65,7 @@ func resourceAccessKeyCreate(ctx context.Context, d *schema.ResourceData, m inte regionName := d.Get("region_name").(string) s3ServiceDetail := getServiceEnableRegion(service, vpcId, regionName) if s3ServiceDetail.S3ServiceId == "" { - return diag.FromErr(fmt.Errorf("region %s is not enabled", regionName)) + return diag.FromErr(fmt.Errorf(regionError, regionName)) } resp := service.CreateAccessKey(vpcId, s3ServiceDetail.S3ServiceId) @@ -144,18 +143,15 @@ func resourceAccessKeyDelete(ctx context.Context, d *schema.ResourceData, m inte s3ServiceDetail := getServiceEnableRegion(service, vpcId, regionName) if s3ServiceDetail.S3ServiceId == "" { - log.Printf("[ERROR] Region %s is not enabled for VPC %s", regionName, vpcId) - return diag.FromErr(fmt.Errorf("region %s is not enabled", regionName)) + return diag.FromErr(fmt.Errorf(regionError, regionName)) } if accessKeyId == "" { - log.Printf("[ERROR] access_key_id is empty") return diag.Errorf("access_key_id is required for deletion") } data := service.DeleteAccessKey(vpcId, s3ServiceDetail.S3ServiceId, accessKeyId) if !data.Status { - log.Printf("[ERROR] Failed to delete access key %s: %v", accessKeyId, data.Message) return diag.Errorf("failed to delete access key %s: %s", accessKeyId, data.Message) } if err := d.Set("status", true); err != nil { diff --git a/fptcloud/object-storage/resource_bucket.go b/fptcloud/object-storage/resource_bucket.go index 366abd8..d262235 100644 --- a/fptcloud/object-storage/resource_bucket.go +++ b/fptcloud/object-storage/resource_bucket.go @@ -94,7 +94,7 @@ func resourceBucketCreate(ctx context.Context, d *schema.ResourceData, m interfa } s3ServiceDetail := getServiceEnableRegion(objectStorageService, vpcId, d.Get("region_name").(string)) if s3ServiceDetail.S3ServiceId == "" { - return diag.FromErr(fmt.Errorf("region %s is not enabled", d.Get("region_name").(string))) + return diag.FromErr(fmt.Errorf(regionError, d.Get("region_name").(string))) } bucket := objectStorageService.CreateBucket(req, vpcId, s3ServiceDetail.S3ServiceId) @@ -109,7 +109,7 @@ func resourceBucketRead(_ context.Context, d *schema.ResourceData, m interface{} vpcId := d.Get("vpc_id").(string) s3ServiceDetail := getServiceEnableRegion(objectStorageService, vpcId, d.Get("region_name").(string)) if s3ServiceDetail.S3ServiceId == "" { - return diag.FromErr(fmt.Errorf("region %s is not enabled", d.Get("region_name").(string))) + return diag.FromErr(fmt.Errorf(regionError, d.Get("region_name").(string))) } bucket := objectStorageService.ListBuckets(vpcId, s3ServiceDetail.S3ServiceId, 1, 99999) @@ -133,7 +133,7 @@ func resourceBucketDelete(ctx context.Context, d *schema.ResourceData, m interfa bucketName := d.Get("name").(string) s3ServiceDetail := getServiceEnableRegion(objectStorageService, vpcId, d.Get("region_name").(string)) if s3ServiceDetail.S3ServiceId == "" { - return diag.FromErr(fmt.Errorf("region %s is not enabled", d.Get("region_name").(string))) + return diag.FromErr(fmt.Errorf(regionError, d.Get("region_name").(string))) } satus := objectStorageService.DeleteBucket(vpcId, s3ServiceDetail.S3ServiceId, bucketName) diff --git a/fptcloud/object-storage/resource_bucket_acl.go b/fptcloud/object-storage/resource_bucket_acl.go index 3d1c0e9..f569a4e 100644 --- a/fptcloud/object-storage/resource_bucket_acl.go +++ b/fptcloud/object-storage/resource_bucket_acl.go @@ -70,7 +70,7 @@ func resourceBucketAclCreate(ctx context.Context, d *schema.ResourceData, m inte } s3ServiceDetail := getServiceEnableRegion(service, vpcId, regionName) if s3ServiceDetail.S3ServiceId == "" { - return diag.FromErr(fmt.Errorf("region %s is not enabled", regionName)) + return diag.FromErr(fmt.Errorf(regionError, regionName)) } var bucketAclRequest BucketAclRequest bucketAclRequest.CannedAcl = cannedAcl @@ -97,7 +97,7 @@ func resourceBucketAclRead(ctx context.Context, d *schema.ResourceData, m interf regionName := d.Get("region_name").(string) s3ServiceDetail := getServiceEnableRegion(service, vpcId, regionName) if s3ServiceDetail.S3ServiceId == "" { - return diag.FromErr(fmt.Errorf("region %s is not enabled", regionName)) + return diag.FromErr(fmt.Errorf(regionError, regionName)) } r := service.GetBucketAcl(vpcId, s3ServiceDetail.S3ServiceId, bucketName) if !r.Status { diff --git a/fptcloud/object-storage/resource_bucket_cors.go b/fptcloud/object-storage/resource_bucket_cors.go index dca724d..27c198e 100644 --- a/fptcloud/object-storage/resource_bucket_cors.go +++ b/fptcloud/object-storage/resource_bucket_cors.go @@ -18,24 +18,6 @@ func ResourceBucketCors() *schema.Resource { DeleteContext: resourceBucketCorsDelete, ReadContext: resourceBucketCorsRead, Schema: map[string]*schema.Schema{ - "bucket_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: "Name of the bucket", - }, - "vpc_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: "The VPC ID", - }, - "region_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: "The region name that's are the same with the region name in the S3 service. Currently, we have: HCM-01, HCM-02, HN-01, HN-02", - }, "cors_config": { Type: schema.TypeString, Optional: true, @@ -68,6 +50,24 @@ func ResourceBucketCors() *schema.Resource { }, }, }, + "bucket_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Name of the bucket", + }, + "vpc_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The VPC ID", + }, + "region_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The region name that's are the same with the region name in the S3 service. Currently, we have: HCM-01, HCM-02, HN-01, HN-02", + }, }, } } @@ -80,7 +80,7 @@ func resourceBucketCorsCreate(ctx context.Context, d *schema.ResourceData, m int regionName := d.Get("region_name").(string) s3ServiceDetail := getServiceEnableRegion(service, vpcId, regionName) if s3ServiceDetail.S3ServiceId == "" { - return diag.FromErr(fmt.Errorf("region %s is not enabled", regionName)) + return diag.FromErr(fmt.Errorf(regionError, regionName)) } var corsConfigData string @@ -99,10 +99,10 @@ func resourceBucketCorsCreate(ctx context.Context, d *schema.ResourceData, m int return diag.FromErr(err) } payload := map[string]interface{}{ - "ID": jsonMap.ID, "AllowedMethods": jsonMap.AllowedMethods, - "AllowedOrigins": jsonMap.AllowedOrigins, "MaxAgeSeconds": jsonMap.MaxAgeSeconds, + "ID": jsonMap.ID, + "AllowedOrigins": jsonMap.AllowedOrigins, } if len(jsonMap.AllowedHeaders) > 0 { payload["AllowedHeaders"] = jsonMap.AllowedHeaders @@ -131,7 +131,7 @@ func resourceBucketCorsRead(_ context.Context, d *schema.ResourceData, m interfa regionName := d.Get("region_name").(string) s3ServiceDetail := getServiceEnableRegion(service, vpcId, regionName) if s3ServiceDetail.S3ServiceId == "" { - return diag.FromErr(fmt.Errorf("region %s is not enabled", regionName)) + return diag.FromErr(fmt.Errorf(regionError, regionName)) } page := 1 pageSize := 999999 @@ -169,7 +169,7 @@ func resourceBucketCorsDelete(ctx context.Context, d *schema.ResourceData, m int regionName := d.Get("region_name").(string) s3ServiceDetail := getServiceEnableRegion(service, vpcId, regionName) if s3ServiceDetail.S3ServiceId == "" { - return diag.FromErr(fmt.Errorf("region %s is not enabled", regionName)) + return diag.FromErr(fmt.Errorf(regionError, regionName)) } var corsConfigData string if v, ok := d.GetOk("cors_config"); ok { @@ -189,9 +189,9 @@ func resourceBucketCorsDelete(ctx context.Context, d *schema.ResourceData, m int var payload []map[string]interface{} for _, corsRule := range jsonMap { payload := map[string]interface{}{ - "ID": corsRule.ID, - "AllowedMethods": corsRule.AllowedMethods, "AllowedOrigins": corsRule.AllowedOrigins, + "AllowedMethods": corsRule.AllowedMethods, + "ID": corsRule.ID, "MaxAgeSeconds": corsRule.MaxAgeSeconds, } if len(corsRule.AllowedHeaders) > 0 { diff --git a/fptcloud/object-storage/resource_bucket_lifecycle.go b/fptcloud/object-storage/resource_bucket_lifecycle.go index 48b6600..bbdc9cb 100644 --- a/fptcloud/object-storage/resource_bucket_lifecycle.go +++ b/fptcloud/object-storage/resource_bucket_lifecycle.go @@ -24,7 +24,23 @@ func ResourceBucketLifeCycle() *schema.Resource { ForceNew: true, Description: "The VPC ID", }, - "bucket_name": { + "state": { + Type: schema.TypeBool, + Computed: true, + Description: "State after bucket lifecycle rule is created", + }, + "rules": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, "bucket_name": { Type: schema.TypeString, Required: true, ForceNew: true, @@ -51,27 +67,17 @@ func ResourceBucketLifeCycle() *schema.Resource { Description: "Path to the JSON file containing the bucket lifecycle rule, support only one rule", ConflictsWith: []string{"life_cycle_rule"}, }, - "state": { - Type: schema.TypeBool, - Computed: true, - Description: "State after bucket lifecycle rule is created", - }, - "rules": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Computed: true, - }, - }, - }, - }, }, } } - +func parseLifeCycleData(lifeCycleData string) (S3BucketLifecycleConfig, error) { + var jsonMap S3BucketLifecycleConfig + err := json.Unmarshal([]byte(lifeCycleData), &jsonMap) + if err != nil { + return S3BucketLifecycleConfig{}, err + } + return jsonMap, nil +} func resourceBucketLifeCycleCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { client := m.(*common.Client) service := NewObjectStorageService(client) @@ -91,18 +97,17 @@ func resourceBucketLifeCycleCreate(ctx context.Context, d *schema.ResourceData, } s3ServiceDetail := getServiceEnableRegion(service, vpcId, regionName) if s3ServiceDetail.S3ServiceId == "" { - return diag.FromErr(fmt.Errorf("region %s is not enabled", regionName)) + return diag.FromErr(fmt.Errorf(regionError, regionName)) } - var jsonMap S3BucketLifecycleConfig - err := json.Unmarshal([]byte(lifecycleRuleContent), &jsonMap) + jsonMap, err := parseLifeCycleData(lifecycleRuleContent) if err != nil { return diag.FromErr(err) } payload := map[string]interface{}{ "ID": jsonMap.ID, - "Filter": map[string]interface{}{"Prefix": jsonMap.Filter.Prefix}, "NoncurrentVersionExpiration": map[string]interface{}{"NoncurrentDays": jsonMap.NoncurrentVersionExpiration.NoncurrentDays}, "AbortIncompleteMultipartUpload": map[string]interface{}{"DaysAfterInitiation": jsonMap.AbortIncompleteMultipartUpload.DaysAfterInitiation}, + "Filter": map[string]interface{}{"Prefix": jsonMap.Filter.Prefix}, } if jsonMap.Expiration.Days != 0 && jsonMap.Expiration.ExpiredObjectDeleteMarker { return diag.FromErr(fmt.Errorf("Expiration.Days and Expiration.ExpiredObjectDeleteMarker cannot be set at the same time")) @@ -137,7 +142,7 @@ func resourceBucketLifeCycleRead(_ context.Context, d *schema.ResourceData, m in regionName := d.Get("region_name").(string) s3ServiceDetail := getServiceEnableRegion(service, vpcId, regionName) if s3ServiceDetail.S3ServiceId == "" { - return diag.FromErr(fmt.Errorf("region %s is not enabled", regionName)) + return diag.FromErr(fmt.Errorf(regionError, regionName)) } page := 1 pageSize := 999999 @@ -175,7 +180,7 @@ func resourceBucketLifeCycleDelete(ctx context.Context, d *schema.ResourceData, regionName := d.Get("region_name").(string) s3ServiceDetail := getServiceEnableRegion(service, vpcId, regionName) if s3ServiceDetail.S3ServiceId == "" { - return diag.FromErr(fmt.Errorf("region %s is not enabled", regionName)) + return diag.FromErr(fmt.Errorf(regionError, regionName)) } var lifecycleRuleContent string if v, ok := d.GetOk("life_cycle_rule"); ok { @@ -187,18 +192,18 @@ func resourceBucketLifeCycleDelete(ctx context.Context, d *schema.ResourceData, } else { return diag.FromErr(fmt.Errorf("either 'life_cycle_rule' or 'life_cycle_rule_file' must be specified")) } - var jsonMap S3BucketLifecycleConfig - err := json.Unmarshal([]byte(lifecycleRuleContent), &jsonMap) + jsonMap, err := parseLifeCycleData(lifecycleRuleContent) if err != nil { return diag.FromErr(err) } + payload := map[string]interface{}{ - "ID": jsonMap.ID, - "Filter": map[string]interface{}{"Prefix": jsonMap.Filter.Prefix}, - "NoncurrentVersionExpiration": map[string]interface{}{"NoncurrentDays": jsonMap.NoncurrentVersionExpiration.NoncurrentDays}, "AbortIncompleteMultipartUpload": map[string]interface{}{"DaysAfterInitiation": jsonMap.AbortIncompleteMultipartUpload.DaysAfterInitiation}, "OrgID": jsonMap.ID, "Status": "Enabled", + "ID": jsonMap.ID, + "Filter": map[string]interface{}{"Prefix": jsonMap.Filter.Prefix}, + "NoncurrentVersionExpiration": map[string]interface{}{"NoncurrentDays": jsonMap.NoncurrentVersionExpiration.NoncurrentDays}, } if jsonMap.Expiration.Days != 0 && jsonMap.Expiration.ExpiredObjectDeleteMarker { return diag.FromErr(fmt.Errorf("Expiration.Days and Expiration.ExpiredObjectDeleteMarker cannot be set at the same time")) diff --git a/fptcloud/object-storage/resource_bucket_policy.go b/fptcloud/object-storage/resource_bucket_policy.go index 79cd064..9b0d8ac 100644 --- a/fptcloud/object-storage/resource_bucket_policy.go +++ b/fptcloud/object-storage/resource_bucket_policy.go @@ -18,12 +18,6 @@ func ResourceBucketPolicy() *schema.Resource { DeleteContext: resourceBucketPolicyDelete, ReadContext: dataSourceBucketPolicyRead, Schema: map[string]*schema.Schema{ - "vpc_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: "The VPC ID", - }, "region_name": { Type: schema.TypeString, Required: true, @@ -36,6 +30,17 @@ func ResourceBucketPolicy() *schema.Resource { ForceNew: true, Description: "Name of the bucket", }, + "vpc_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The VPC ID", + }, + "status": { + Type: schema.TypeBool, + Computed: true, + Description: "Status after bucket policy is created", + }, "policy": { Type: schema.TypeString, Optional: true, @@ -51,11 +56,6 @@ func ResourceBucketPolicy() *schema.Resource { Description: "Path to the JSON file containing the bucket policy", ConflictsWith: []string{"policy"}, }, - "status": { - Type: schema.TypeBool, - Computed: true, - Description: "Status after bucket policy is created", - }, }, } } @@ -82,7 +82,7 @@ func resourceBucketPolicyCreate(ctx context.Context, d *schema.ResourceData, m i s3ServiceDetail := getServiceEnableRegion(service, vpcId, regionName) if s3ServiceDetail.S3ServiceId == "" { - return diag.FromErr(fmt.Errorf("region %s is not enabled", regionName)) + return diag.FromErr(fmt.Errorf(regionError, regionName)) } var jsonMap map[string]interface{} err := json.Unmarshal([]byte(policyContent), &jsonMap) @@ -119,7 +119,7 @@ func resourceBucketPolicyDelete(ctx context.Context, d *schema.ResourceData, m i regionName := d.Get("region_name").(string) s3ServiceDetail := getServiceEnableRegion(service, vpcId, regionName) if s3ServiceDetail.S3ServiceId == "" { - return diag.FromErr(fmt.Errorf("region %s is not enabled", d.Get("region_name").(string))) + return diag.FromErr(fmt.Errorf(regionError, d.Get("region_name").(string))) } resp := service.PutBucketPolicy(vpcId, s3ServiceDetail.S3ServiceId, bucketName, BucketPolicyRequest{ diff --git a/fptcloud/object-storage/resource_bucket_static_website.go b/fptcloud/object-storage/resource_bucket_static_website.go index cbc0c31..15cd1e5 100644 --- a/fptcloud/object-storage/resource_bucket_static_website.go +++ b/fptcloud/object-storage/resource_bucket_static_website.go @@ -73,7 +73,7 @@ func resourceBucketStaticWebsiteCreate(ctx context.Context, d *schema.ResourceDa errorDocument := d.Get("error_document_key").(string) s3ServiceDetail := getServiceEnableRegion(service, vpcId, regionName) if s3ServiceDetail.S3ServiceId == "" { - return diag.FromErr(fmt.Errorf("region %s is not enabled", d.Get("region_name").(string))) + return diag.FromErr(fmt.Errorf(regionError, d.Get("region_name").(string))) } putBucketWebsite := service.PutBucketWebsite(vpcId, s3ServiceDetail.S3ServiceId, bucketName, BucketWebsiteRequest{ Bucket: bucketName, @@ -104,7 +104,7 @@ func resourceDeleteBucketStaticWebsite(ctx context.Context, d *schema.ResourceDa regionName := d.Get("region_name").(string) s3ServiceDetail := getServiceEnableRegion(service, vpcId, regionName) if s3ServiceDetail.S3ServiceId == "" { - return diag.FromErr(fmt.Errorf("region %s is not enabled", d.Get("region_name").(string))) + return diag.FromErr(fmt.Errorf(regionError, d.Get("region_name").(string))) } resp := service.DeleteBucketStaticWebsite(vpcId, s3ServiceDetail.S3ServiceId, bucketName) diff --git a/fptcloud/object-storage/resource_bucket_versioning.go b/fptcloud/object-storage/resource_bucket_versioning.go index e744ed4..d10afa3 100644 --- a/fptcloud/object-storage/resource_bucket_versioning.go +++ b/fptcloud/object-storage/resource_bucket_versioning.go @@ -57,7 +57,7 @@ func resourceBucketVersioningCreate(ctx context.Context, d *schema.ResourceData, regionName := d.Get("region_name").(string) s3ServiceDetail := getServiceEnableRegion(service, vpcId, regionName) if s3ServiceDetail.S3ServiceId == "" { - return diag.FromErr(fmt.Errorf("region %s is not enabled", d.Get("region_name").(string))) + return diag.FromErr(fmt.Errorf(regionError, d.Get("region_name").(string))) } err := service.PutBucketVersioning(vpcId, s3ServiceDetail.S3ServiceId, bucketName, BucketVersioningRequest{ Status: versioningStatus, diff --git a/fptcloud/object-storage/resource_sub_user.go b/fptcloud/object-storage/resource_sub_user.go index 254e21e..29e6f52 100644 --- a/fptcloud/object-storage/resource_sub_user.go +++ b/fptcloud/object-storage/resource_sub_user.go @@ -52,7 +52,7 @@ func resourceSubUserCreate(ctx context.Context, d *schema.ResourceData, m interf s3ServiceDetail := getServiceEnableRegion(objectStorageService, vpcId, d.Get("region_name").(string)) if s3ServiceDetail.S3ServiceId == "" { - return diag.FromErr(fmt.Errorf("region %s is not enabled", d.Get("region_name").(string))) + return diag.FromErr(fmt.Errorf(regionError, d.Get("region_name").(string))) } err := objectStorageService.CreateSubUser(req, vpcId, s3ServiceDetail.S3ServiceId) @@ -76,7 +76,7 @@ func resourceSubUserDelete(ctx context.Context, d *schema.ResourceData, m interf vpcId := d.Get("vpc_id").(string) s3ServiceDetail := getServiceEnableRegion(objectStorageService, vpcId, d.Get("region_name").(string)) if s3ServiceDetail.S3ServiceId == "" { - return diag.FromErr(fmt.Errorf("region %s is not enabled", d.Get("region_name").(string))) + return diag.FromErr(fmt.Errorf(regionError, d.Get("region_name").(string))) } err := objectStorageService.DeleteSubUser(d.Id(), vpcId, s3ServiceDetail.S3ServiceId) if err != nil { diff --git a/fptcloud/object-storage/resource_sub_user_keys.go b/fptcloud/object-storage/resource_sub_user_keys.go index 411b707..34a7bd5 100644 --- a/fptcloud/object-storage/resource_sub_user_keys.go +++ b/fptcloud/object-storage/resource_sub_user_keys.go @@ -15,28 +15,28 @@ func ResourceSubUserKeys() *schema.Resource { ReadContext: resourceReadUserDetail, DeleteContext: resourceSubUserAccessKeyDelete, Schema: map[string]*schema.Schema{ - "vpc_id": { + "user_id": { Type: schema.TypeString, Required: true, ForceNew: true, - Description: "The VPC id that the S3 service belongs to", + Description: "The sub user id, can retrieve from data source `fptcloud_object_storage_sub_user`", }, - "region_name": { + "access_key": { Type: schema.TypeString, - Required: true, - ForceNew: true, - Description: "The region name that's are the same with the region name in the S3 service. Currently, we have: HCM-01, HCM-02, HN-01, HN-02", + Computed: true, + Description: "The access key of the sub user", }, - "user_id": { + "vpc_id": { Type: schema.TypeString, Required: true, ForceNew: true, - Description: "The sub user id, can retrieve from data source `fptcloud_object_storage_sub_user`", + Description: "The VPC id that the S3 service belongs to", }, - "access_key": { + "region_name": { Type: schema.TypeString, - Computed: true, - Description: "The access key of the sub user", + Required: true, + ForceNew: true, + Description: "The region name that's are the same with the region name in the S3 service. Currently, we have: HCM-01, HCM-02, HN-01, HN-02", }, "secret_key": { Type: schema.TypeString, @@ -54,7 +54,7 @@ func resourceSubUserAccessKeyCreate(ctx context.Context, d *schema.ResourceData, subUserId := d.Get("user_id").(string) s3ServiceDetail := getServiceEnableRegion(objectStorageService, vpcId, d.Get("region_name").(string)) if s3ServiceDetail.S3ServiceId == "" { - return diag.FromErr(fmt.Errorf("region %s is not enabled", d.Get("region_name").(string))) + return diag.FromErr(fmt.Errorf(regionError, d.Get("region_name").(string))) } resp := objectStorageService.CreateSubUserAccessKey(vpcId, s3ServiceDetail.S3ServiceId, subUserId) @@ -81,7 +81,7 @@ func resourceReadUserDetail(ctx context.Context, d *schema.ResourceData, m inter vpcId := d.Get("vpc_id").(string) s3ServiceDetail := getServiceEnableRegion(objectStorageService, vpcId, d.Get("region_name").(string)) if s3ServiceDetail.S3ServiceId == "" { - return diag.FromErr(fmt.Errorf("region %s is not enabled", d.Get("region_name").(string))) + return diag.FromErr(fmt.Errorf(regionError, d.Get("region_name").(string))) } subUserId := d.Get("user_id").(string) @@ -101,7 +101,7 @@ func resourceSubUserAccessKeyDelete(ctx context.Context, d *schema.ResourceData, vpcId := d.Get("vpc_id").(string) s3ServiceDetail := getServiceEnableRegion(objectStorageService, vpcId, d.Get("region_name").(string)) if s3ServiceDetail.S3ServiceId == "" { - return diag.FromErr(fmt.Errorf("region %s is not enabled", d.Get("region_name").(string))) + return diag.FromErr(fmt.Errorf(regionError, d.Get("region_name").(string))) } subUserId := d.Get("user_id").(string) accessKeyToDelete := d.Get("access_key").(string) diff --git a/main.go b/main.go index 1ec6d5f..6db0c26 100644 --- a/main.go +++ b/main.go @@ -25,7 +25,6 @@ func main() { "set to true to run the provider with support for debuggers", ) flag.Parse() - log.Printf("[DEBUG] Configuring provider...") providers := []func() tfprotov5.ProviderServer{ providerserver.NewProtocol5(fptcloud.NewXplatProvider("dev")()),