From 679194713e7699c7f5cca998a2c376a210f64a9e Mon Sep 17 00:00:00 2001 From: Zhenhua Li Date: Mon, 4 Dec 2023 09:31:34 -0800 Subject: [PATCH 01/44] Remove obsolete methods and files (#9564) --- mmv1/api/compiler.rb | 3 - mmv1/api/resource.rb | 10 - mmv1/compile/core.rb | 5 - mmv1/provider/terraform.rb | 18 -- mmv1/templates/async.yaml.erb | 18 -- mmv1/templates/license.erb | 12 - mmv1/templates/stackdriver.json | 421 -------------------------------- 7 files changed, 487 deletions(-) delete mode 100644 mmv1/templates/async.yaml.erb delete mode 100644 mmv1/templates/license.erb delete mode 100644 mmv1/templates/stackdriver.json diff --git a/mmv1/api/compiler.rb b/mmv1/api/compiler.rb index 9e0eca812610..27a139afbc81 100644 --- a/mmv1/api/compiler.rb +++ b/mmv1/api/compiler.rb @@ -15,7 +15,6 @@ require 'api/product' require 'api/resource' require 'api/type' -require 'compile/core' require 'google/yaml_validator' module Api @@ -23,8 +22,6 @@ module Api class Compiler include Compile::Core - attr_reader :product - def initialize(catalog) @catalog = catalog end diff --git a/mmv1/api/resource.rb b/mmv1/api/resource.rb index 4ccec699fb8d..1fd7d2a9711b 100644 --- a/mmv1/api/resource.rb +++ b/mmv1/api/resource.rb @@ -619,16 +619,6 @@ def collection_uri @base_url end - def async_operation_url - [@__product.base_url, async_operation_uri].flatten.join - end - - def async_operation_uri - raise 'Not an async resource' if async.nil? - - async.operation.base_url - end - def full_create_url [@__product.base_url, create_uri].flatten.join end diff --git a/mmv1/compile/core.rb b/mmv1/compile/core.rb index e90cc9c73462..2bdcf89f18be 100644 --- a/mmv1/compile/core.rb +++ b/mmv1/compile/core.rb @@ -16,11 +16,6 @@ require 'ostruct' module Compile - # Unique ID for the Google libraries to be compiled/used by modules - module Libraries - NETWORK = 'network'.freeze - end - # Helper functions to aid compiling and including files module Core def compiler diff --git a/mmv1/provider/terraform.rb b/mmv1/provider/terraform.rb index f409f0c84967..20c513bf5d6e 100644 --- a/mmv1/provider/terraform.rb +++ b/mmv1/provider/terraform.rb @@ -619,28 +619,10 @@ def titlelize_property(property) # that should not be exposed outside the object hierarchy. private - def generate_requires(properties, requires = []) - requires.concat(properties.collect(&:requires)) - end - def provider_name self.class.name.split('::').last.downcase end - # Determines the copyright year. If the file already exists we'll attempt to - # recognize the copyright year, and if it finds it will keep it. - def effective_copyright_year(out_file) - copyright_mask = /# Copyright (?[0-9-]*) Google Inc./ - if File.exist?(out_file) - first_line = File.read(out_file).split("\n") - .select { |l| copyright_mask.match(l) } - .first - matcher = copyright_mask.match(first_line) - return matcher[:year] unless matcher.nil? - end - Time.now.year - end - # Adapted from the method used in templating # See: mmv1/compile/core.rb def comment_block(text, lang) diff --git a/mmv1/templates/async.yaml.erb b/mmv1/templates/async.yaml.erb deleted file mode 100644 index 568a5b7516ae..000000000000 --- a/mmv1/templates/async.yaml.erb +++ /dev/null @@ -1,18 +0,0 @@ -async: !ruby/object:Api::OpAsync - operation: !ruby/object:Api::OpAsync::Operation - kind: 'sql#operation' - path: 'name' - base_url: 'projects/{{project}}/operations/{{op_id}}' - wait_ms: 1000 - result: !ruby/object:Api::OpAsync::Result - path: 'targetLink' - status: !ruby/object:Api::OpAsync::Status - path: 'status' - complete: 'DONE' - allowed: - - 'PENDING' - - 'RUNNING' - - 'DONE' - error: !ruby/object:Api::OpAsync::Error - path: 'error/errors' - message: 'message' diff --git a/mmv1/templates/license.erb b/mmv1/templates/license.erb deleted file mode 100644 index 2fd5d2f92d29..000000000000 --- a/mmv1/templates/license.erb +++ /dev/null @@ -1,12 +0,0 @@ -# Copyright <%= effective_copyright_year(out_file) -%> Google Inc. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/mmv1/templates/stackdriver.json b/mmv1/templates/stackdriver.json deleted file mode 100644 index 33430fbacff5..000000000000 --- a/mmv1/templates/stackdriver.json +++ /dev/null @@ -1,421 +0,0 @@ -{ - "parameters": { - "key": { - "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", - "type": "string", - "location": "query" - }, - "access_token": { - "location": "query", - "description": "OAuth access token.", - "type": "string" - }, - "upload_protocol": { - "location": "query", - "description": "Upload protocol for media (e.g. \"raw\", \"multipart\").", - "type": "string" - }, - "prettyPrint": { - "location": "query", - "description": "Returns response with indentations and line breaks.", - "type": "boolean", - "default": "true" - }, - "quotaUser": { - "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.", - "type": "string", - "location": "query" - }, - "uploadType": { - "location": "query", - "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").", - "type": "string" - }, - "fields": { - "type": "string", - "location": "query", - "description": "Selector specifying which fields to include in a partial response." - }, - "$.xgafv": { - "description": "V1 error format.", - "type": "string", - "enumDescriptions": [ - "v1 error format", - "v2 error format" - ], - "location": "query", - "enum": [ - "1", - "2" - ] - }, - "oauth_token": { - "description": "OAuth 2.0 token for the current user.", - "type": "string", - "location": "query" - }, - "callback": { - "description": "JSONP", - "type": "string", - "location": "query" - }, - "alt": { - "default": "json", - "enum": [ - "json", - "media", - "proto" - ], - "type": "string", - "enumDescriptions": [ - "Responses with Content-Type of application/json", - "Media download with context-dependent Content-Type", - "Responses with Content-Type of application/x-protobuf" - ], - "location": "query", - "description": "Data format for response." - } - }, - "version": "v2", - "baseUrl": "https://stackdriver.googleapis.com/", - "kind": "discovery#restDescription", - "description": "Provides users with programmatic access to Stackdriver endpoints that allow putting VM instances and other resources into maintenance mode.", - "servicePath": "", - "basePath": "", - "revision": "20200323", - "documentationLink": "https://cloud.google.com/stackdriver/docs/", - "id": "stackdriver:v2", - "discoveryVersion": "v1", - "version_module": true, - "schemas": { - "Status": { - "description": "The `Status` type defines a logical error model that is suitable for\ndifferent programming environments, including REST APIs and RPC APIs. It is\nused by [gRPC](https://github.com/grpc). Each `Status` message contains\nthree pieces of data: error code, error message, and error details.\n\nYou can find out more about this error model and how to work with it in the\n[API Design Guide](https://cloud.google.com/apis/design/errors).", - "type": "object", - "properties": { - "details": { - "type": "array", - "items": { - "type": "object", - "additionalProperties": { - "type": "any", - "description": "Properties of the object. Contains field @type with type URL." - } - }, - "description": "A list of messages that carry the error details. There is a common set of\nmessage types for APIs to use." - }, - "code": { - "description": "The status code, which should be an enum value of google.rpc.Code.", - "format": "int32", - "type": "integer" - }, - "message": { - "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\ngoogle.rpc.Status.details field, or localized by the client.", - "type": "string" - } - }, - "id": "Status" - }, - "Operation": { - "id": "Operation", - "description": "This resource represents a long-running operation that is the result of a\nnetwork API call.", - "type": "object", - "properties": { - "response": { - "description": "The normal response of the operation in case of success. If the original\nmethod returns no data on success, such as `Delete`, the response is\n`google.protobuf.Empty`. If the original method is standard\n`Get`/`Create`/`Update`, the response should be the resource. For other\nmethods, the response should have the type `XxxResponse`, where `Xxx`\nis the original method name. For example, if the original method name\nis `TakeSnapshot()`, the inferred response type is\n`TakeSnapshotResponse`.", - "type": "object", - "additionalProperties": { - "description": "Properties of the object. Contains field @type with type URL.", - "type": "any" - } - }, - "name": { - "description": "The server-assigned name, which is only unique within the same service that\noriginally returns it. If you use the default HTTP mapping, the\n`name` should be a resource name ending with `operations/{unique_id}`.", - "type": "string" - }, - "error": { - "$ref": "Status", - "description": "The error result of the operation in case of failure or cancellation." - }, - "metadata": { - "type": "object", - "additionalProperties": { - "type": "any", - "description": "Properties of the object. Contains field @type with type URL." - }, - "description": "Service-specific metadata associated with the operation. It typically\ncontains progress information and common metadata such as create time.\nSome services might not provide such metadata. Any method that returns a\nlong-running operation should document the metadata type, if any." - }, - "done": { - "description": "If the value is `false`, it means the operation is still in progress.\nIf `true`, the operation is completed, and either `error` or `response` is\navailable.", - "type": "boolean" - } - } - }, - "OperationMetadata": { - "id": "OperationMetadata", - "description": "Contains metadata for longrunning operations in the Stackdriver API.", - "type": "object", - "properties": { - "state": { - "enum": [ - "STATE_UNSPECIFIED", - "CREATED", - "RUNNING", - "DONE", - "CANCELLED" - ], - "description": "Current state of the batch operation.", - "type": "string", - "enumDescriptions": [ - "Invalid.", - "Request is received.", - "Request is actively being processed.", - "The batch processing is done.", - "The batch processing was cancelled." - ] - }, - "updateTime": { - "type": "string", - "description": "The time when the operation result was last updated.", - "format": "google-datetime" - }, - "createTime": { - "description": "The time when the batch request was received.", - "format": "google-datetime", - "type": "string" - } - } - }, - "MonitoredProject": { - "description": "A single cloud account being monitored within a Stackdriver account.", - "type": "object", - "properties": { - "projectNumber": { - "description": "Output only. The GCP-assigned project number.", - "format": "int64", - "type": "string" - }, - "createTime": { - "description": "Output only. The instant when this monitored project was created.", - "format": "google-datetime", - "type": "string" - }, - "updateTime": { - "description": "Output only. The instant when this monitored project was last updated.", - "format": "google-datetime", - "type": "string" - }, - "name": { - "description": "The resource name of the monitored project within a Stackdriver account.\nIncludes the host project id and monitored project id. On output it\nwill always contain the project number.\nExample: \u003ccode\u003eaccounts/my-project/projects/my-other-project\u003c/code\u003e", - "type": "string" - }, - "projectId": { - "description": "Output only. The GCP-assigned project id.\nExample: \u003ccode\u003eprojecty-project-101\u003c/code\u003e", - "type": "string" - }, - "organizationId": { - "description": "Optional, input only. The Id of the organization to hold the GCP Project\nfor a newly created monitored project.\nThis field is ignored if the GCP project already exists.", - "type": "string" - } - }, - "id": "MonitoredProject" - }, - "StackdriverAccount": { - "id": "StackdriverAccount", - "description": "A Workspace in Stackdriver Monitoring, which specifies one or more GCP\nprojects and zero or more AWS accounts to monitor together.\nOne GCP project acts as the Workspace's host.\nGCP projects and AWS accounts cannot be monitored until they are associated\nwith a Workspace.", - "type": "object", - "properties": { - "monitoredProjects": { - "description": "Output only. The GCP projects monitored in this Stackdriver account.", - "type": "array", - "items": { - "$ref": "MonitoredProject" - } - }, - "createTime": { - "type": "string", - "description": "Output only. The instant when this account was created.", - "format": "google-datetime" - }, - "hostProjectId": { - "description": "Output only. The GCP project id for the host project of this account.", - "type": "string" - }, - "updateTime": { - "description": "Output only. The instant when this account record was last updated.", - "format": "google-datetime", - "type": "string" - }, - "hostProjectNumber": { - "description": "Output only. The GCP project number for the host project of this account.", - "format": "int64", - "type": "string" - }, - "name": { - "description": "The resource name of the Stackdriver account, including the host project\nid or number. On output it will always be the host project number.\nExample: \u003ccode\u003eaccounts/[PROJECT_ID]\u003c/code\u003e or\n\u003ccode\u003eaccounts/[PROJECT_NUMBER]\u003c/code\u003e", - "type": "string" - }, - "organizationId": { - "description": "Optional, input only. The Id of the organization to hold the GCP Project\nfor a newly created Stackdriver account.\nThis field is ignored if the GCP project already exists.", - "type": "string" - } - } - } - }, - "protocol": "rest", - "icons": { - "x32": "http://www.google.com/images/icons/product/search-32.gif", - "x16": "http://www.google.com/images/icons/product/search-16.gif" - }, - "canonicalName": "Stackdriver", - "auth": { - "oauth2": { - "scopes": { - "https://www.googleapis.com/auth/monitoring": { - "description": "View and write monitoring data for all of your Google and third-party Cloud and API projects" - }, - "https://www.googleapis.com/auth/monitoring.write": { - "description": "Publish metric data to your Google Cloud projects" - }, - "https://www.googleapis.com/auth/cloud-platform": { - "description": "View and manage your data across Google Cloud Platform services" - }, - "https://www.googleapis.com/auth/monitoring.read": { - "description": "View monitoring data for all of your Google Cloud and third-party projects" - } - } - } - }, - "rootUrl": "https://stackdriver.googleapis.com/", - "ownerDomain": "google.com", - "name": "stackdriver", - "batchPath": "batch", - "mtlsRootUrl": "https://stackdriver.mtls.googleapis.com/", - "fullyEncodeReservedExpansion": true, - "title": "Stackdriver API", - "ownerName": "Google", - "resources": { - "accounts": { - "methods": { - "get": { - "httpMethod": "GET", - "parameterOrder": [ - "name" - ], - "response": { - "$ref": "StackdriverAccount" - }, - "parameters": { - "name": { - "location": "path", - "description": "The unique name of the Stackdriver account.\nCaller needs stackdriver.projects.get permission on the host project.", - "required": true, - "type": "string", - "pattern": "^accounts/[^/]+$" - }, - "includeProjects": { - "type": "boolean", - "location": "query", - "description": "If true the monitored_projects collection will be populated with any\nentries, if false it will be empty." - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring", - "https://www.googleapis.com/auth/monitoring.read" - ], - "flatPath": "v2/accounts/{accountsId}", - "id": "stackdriver.accounts.get", - "path": "v2/{+name}", - "description": "Fetches a specific Stackdriver account." - }, - "create": { - "response": { - "$ref": "Operation" - }, - "parameterOrder": [], - "httpMethod": "POST", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring", - "https://www.googleapis.com/auth/monitoring.write" - ], - "parameters": {}, - "flatPath": "v2/accounts", - "path": "v2/accounts", - "id": "stackdriver.accounts.create", - "request": { - "$ref": "StackdriverAccount" - }, - "description": "Creates a new Stackdriver account with a given host project.\nA MonitoredProject for that project will be attached to it if successful.\n\nOperation\u003cresponse: StackdriverAccount\u003e" - } - }, - "resources": { - "projects": { - "methods": { - "create": { - "response": { - "$ref": "Operation" - }, - "parameterOrder": [ - "parent" - ], - "httpMethod": "POST", - "parameters": { - "parent": { - "description": "The unique name of the Stackdriver account that will host this project.\nCaller needs stackdriver.projects.edit permission on the host project.", - "required": true, - "type": "string", - "pattern": "^accounts/[^/]+$", - "location": "path" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring", - "https://www.googleapis.com/auth/monitoring.write" - ], - "flatPath": "v2/accounts/{accountsId}/projects", - "path": "v2/{+parent}/projects", - "id": "stackdriver.accounts.projects.create", - "description": "Creates a new monitored project in a Stackdriver account.\nOperation\u003cresponse: MonitoredProject\u003e", - "request": { - "$ref": "MonitoredProject" - } - } - } - } - } - }, - "operations": { - "methods": { - "get": { - "httpMethod": "GET", - "response": { - "$ref": "Operation" - }, - "parameterOrder": [ - "name" - ], - "parameters": { - "name": { - "pattern": "^operations/.*$", - "location": "path", - "description": "The name of the operation resource.", - "required": true, - "type": "string" - } - }, - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/monitoring", - "https://www.googleapis.com/auth/monitoring.read" - ], - "flatPath": "v2/operations/{operationsId}", - "id": "stackdriver.operations.get", - "path": "v2/{+name}", - "description": "Gets the latest state of a long-running operation. Clients can use this\nmethod to poll the operation result at intervals as recommended by the API\nservice." - } - } - } - } -} \ No newline at end of file From 8bad5239bd5adeb90f8d6706d31431f64fc09542 Mon Sep 17 00:00:00 2001 From: Scott Suarez Date: Mon, 4 Dec 2023 10:16:00 -0800 Subject: [PATCH 02/44] Update enrolled_teams.yml 12/1/23 (#9568) --- tools/issue-labeler/labeler/enrolled_teams.yml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tools/issue-labeler/labeler/enrolled_teams.yml b/tools/issue-labeler/labeler/enrolled_teams.yml index 112c8212a205..0fb80fd35430 100755 --- a/tools/issue-labeler/labeler/enrolled_teams.yml +++ b/tools/issue-labeler/labeler/enrolled_teams.yml @@ -35,6 +35,9 @@ service/apigateway: service/apigee: resources: - google_apigee_.* +service/apikeys: + resources: + - google_apikeys_key service/artifactregistry: resources: - google_artifact_registry_.* @@ -285,6 +288,9 @@ service/deploymentmanager: service/dialogflow: resources: - google_dialogflow_.* +service/discoveryengine: + resources: + - google_discoveryengine_.* service/dlp: resources: - google_data_loss_prevention_.* @@ -520,6 +526,9 @@ service/service-networking: service/servicedirectory: resources: - google_service_directory_.* +service/serviceusage: + resources: + - google_project_service service/serviceusage-quota: resources: - google_service_usage_consumer_quota_override From 9a32d4cb39052f3f49939325dc589f1302b95a1d Mon Sep 17 00:00:00 2001 From: Julio Castillo Date: Mon, 4 Dec 2023 19:24:42 +0100 Subject: [PATCH 03/44] Document `dns_name` and `psc_service_attachment_link` for Cloud SQL instances. (#9517) --- .../website/docs/r/sql_database_instance.html.markdown | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/mmv1/third_party/terraform/website/docs/r/sql_database_instance.html.markdown b/mmv1/third_party/terraform/website/docs/r/sql_database_instance.html.markdown index 027776ab0922..fc057586edbc 100644 --- a/mmv1/third_party/terraform/website/docs/r/sql_database_instance.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/sql_database_instance.html.markdown @@ -517,6 +517,8 @@ exported: * `connection_name` - The connection name of the instance to be used in connection strings. For example, when connecting with [Cloud SQL Proxy](https://cloud.google.com/sql/docs/mysql/connect-admin-proxy). +* `dsn_name` - The DNS name of the instance. See [Connect to an instance using Private Service Connect](https://cloud.google.com/sql/docs/mysql/configure-private-service-connect#view-summary-information-cloud-sql-instances-psc-enabled) for more details. + * `service_account_email_address` - The service account email address assigned to the instance. @@ -549,6 +551,8 @@ a workaround for an [issue fixed in Terraform 0.12](https://github.com/hashicorp but also provides a convenient way to access an IP of a specific type without performing filtering in a Terraform config. +* `psc_service_attachment_link` - the URI that points to the service attachment of the instance. + * `instance_type` - The type of the instance. The supported values are `SQL_INSTANCE_TYPE_UNSPECIFIED`, `CLOUD_SQL_INSTANCE`, `ON_PREMISES_INSTANCE` and `READ_REPLICA_INSTANCE`. ~> **NOTE:** Users can upgrade a read replica instance to a stand-alone Cloud SQL instance with the help of `instance_type`. To promote, users have to set the `instance_type` property as `CLOUD_SQL_INSTANCE` and remove/unset `master_instance_name` and `replica_configuration` from instance configuration. This operation might cause your instance to restart. From 6d9fb147f6f82a4be6820e179a66e7f883885be6 Mon Sep 17 00:00:00 2001 From: Swamita Gupta <55314843+swamitagupta@users.noreply.github.com> Date: Tue, 5 Dec 2023 01:00:46 +0530 Subject: [PATCH 04/44] Add Nsx credentials datasource to vmwareengine (#9533) * Add Network and PC bootstrap functions * Add NSX Credentials to Vmwareengine --- .../provider/provider_mmv1_resources.go.erb | 1 + ...rce_google_vmwareengine_nsx_credentials.go | 90 +++++++++++++++++++ ...esource_vmwareengine_private_cloud_test.go | 26 +++++- ...vmwareengine_nsx_credentials.html.markdown | 33 +++++++ 4 files changed, 149 insertions(+), 1 deletion(-) create mode 100644 mmv1/third_party/terraform/services/vmwareengine/data_source_google_vmwareengine_nsx_credentials.go create mode 100644 mmv1/third_party/terraform/website/docs/d/vmwareengine_nsx_credentials.html.markdown diff --git a/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb b/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb index d2a58b745435..f05ea5d6faba 100644 --- a/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb +++ b/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb @@ -205,6 +205,7 @@ var handwrittenDatasources = map[string]*schema.Resource{ "google_vmwareengine_network": vmwareengine.DataSourceVmwareengineNetwork(), "google_vmwareengine_network_peering": vmwareengine.DataSourceVmwareengineNetworkPeering(), "google_vmwareengine_network_policy": vmwareengine.DataSourceVmwareengineNetworkPolicy(), + "google_vmwareengine_nsx_credentials": vmwareengine.DataSourceVmwareengineNsxCredentials(), "google_vmwareengine_private_cloud": vmwareengine.DataSourceVmwareenginePrivateCloud(), // ####### END handwritten datasources ########### diff --git a/mmv1/third_party/terraform/services/vmwareengine/data_source_google_vmwareengine_nsx_credentials.go b/mmv1/third_party/terraform/services/vmwareengine/data_source_google_vmwareengine_nsx_credentials.go new file mode 100644 index 000000000000..2a22d6db7eb2 --- /dev/null +++ b/mmv1/third_party/terraform/services/vmwareengine/data_source_google_vmwareengine_nsx_credentials.go @@ -0,0 +1,90 @@ +package vmwareengine + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceVmwareengineNsxCredentials() *schema.Resource { + return &schema.Resource{ + Read: dataSourceVmwareengineNsxCredentialsRead, + Schema: map[string]*schema.Schema{ + "parent": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The resource name of the private cloud which contains NSX. +Resource names are schemeless URIs that follow the conventions in https://cloud.google.com/apis/design/resource_names. +For example: projects/my-project/locations/us-west1-a/privateClouds/my-cloud`, + }, + "username": { + Type: schema.TypeString, + Computed: true, + Description: `Initial username.`, + }, + "password": { + Type: schema.TypeString, + Computed: true, + Description: `Initial password.`, + }, + }, + } +} + +func dataSourceVmwareengineNsxCredentialsRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{VmwareengineBasePath}}{{parent}}:showNsxCredentials") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + ErrorAbortPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.Is429QuotaError}, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("VmwareengineNsxCredentials %q", d.Id())) + } + + if err := d.Set("username", flattenVmwareengineNsxCredentailsUsername(res["username"], d, config)); err != nil { + return fmt.Errorf("Error reading NsxCredentails: %s", err) + } + if err := d.Set("password", flattenVmwareengineNsxCredentailsPassword(res["password"], d, config)); err != nil { + return fmt.Errorf("Error reading NsxCredentails: %s", err) + } + + id, err := tpgresource.ReplaceVars(d, config, "{{parent}}:nsx-credentials") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return nil +} + +func flattenVmwareengineNsxCredentailsUsername(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVmwareengineNsxCredentailsPassword(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} diff --git a/mmv1/third_party/terraform/services/vmwareengine/resource_vmwareengine_private_cloud_test.go b/mmv1/third_party/terraform/services/vmwareengine/resource_vmwareengine_private_cloud_test.go index 563dfa8d3f86..54ede7d89284 100644 --- a/mmv1/third_party/terraform/services/vmwareengine/resource_vmwareengine_private_cloud_test.go +++ b/mmv1/third_party/terraform/services/vmwareengine/resource_vmwareengine_private_cloud_test.go @@ -30,6 +30,7 @@ func TestAccVmwareenginePrivateCloud_vmwareEnginePrivateCloudUpdate(t *testing.T Config: testPrivateCloudUpdateConfig(context, "description1", 3), Check: resource.ComposeTestCheckFunc( acctest.CheckDataSourceStateMatchesResourceStateWithIgnores("data.google_vmwareengine_private_cloud.ds", "google_vmwareengine_private_cloud.vmw-engine-pc", map[string]struct{}{}), + testAccCheckGoogleVmwareengineNsxCredentialsMeta("data.google_vmwareengine_nsx_credentials.nsx-ds"), ), }, { @@ -90,7 +91,7 @@ resource "google_vmwareengine_private_cloud" "vmw-engine-pc" { } } -data "google_vmwareengine_private_cloud" ds { +data "google_vmwareengine_private_cloud" "ds" { location = "%{region}-a" name = "tf-test-sample-pc%{random_suffix}" depends_on = [ @@ -98,9 +99,32 @@ data "google_vmwareengine_private_cloud" ds { ] } +# NSX Credentials is a child datasource of PC and is included in the PC test due to the high deployment time involved in the Creation and deletion of a PC +data "google_vmwareengine_nsx_credentials" "nsx-ds" { + parent = google_vmwareengine_private_cloud.vmw-engine-pc +} + `, context) } +func testAccCheckGoogleVmwareengineNsxCredentialsMeta(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Can't find nsx credentials data source: %s", n) + } + _, ok = rs.Primary.Attributes["username"] + if !ok { + return fmt.Errorf("can't find 'username' attribute in data source: %s", n) + } + _, ok = rs.Primary.Attributes["password"] + if !ok { + return fmt.Errorf("can't find 'password' attribute in data source: %s", n) + } + return nil + } +} + func testAccCheckVmwareenginePrivateCloudDestroyProducer(t *testing.T) func(s *terraform.State) error { return func(s *terraform.State) error { for name, rs := range s.RootModule().Resources { diff --git a/mmv1/third_party/terraform/website/docs/d/vmwareengine_nsx_credentials.html.markdown b/mmv1/third_party/terraform/website/docs/d/vmwareengine_nsx_credentials.html.markdown new file mode 100644 index 000000000000..36001b3d5ca1 --- /dev/null +++ b/mmv1/third_party/terraform/website/docs/d/vmwareengine_nsx_credentials.html.markdown @@ -0,0 +1,33 @@ +--- +subcategory: "Cloud VMware Engine" +description: |- + Get NSX Credentials of a Private Cloud. +--- + +# google\_vmwareengine\_nsx_credentials + +Use this data source to get NSX credentials for a Private Cloud. + +To get more information about private cloud NSX credentials, see: +* [API documentation](https://cloud.google.com/vmware-engine/docs/reference/rest/v1/projects.locations.privateClouds/showNsxCredentials) + +## Example Usage + +```hcl +data "google_vmwareengine_nsx_credentials" "ds" { + parent = "projects/my-project/locations/us-west1-a/privateClouds/my-cloud" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `parent` - (Required) The resource name of the private cloud which contains the NSX. + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `username` - The username of the NSX Credential. +* `password` - The password of the NSX Credential. \ No newline at end of file From 3f4a82e143a3eeaa41e74e7932f63359d741ae27 Mon Sep 17 00:00:00 2001 From: Raz Amir <88726761+ramir-savvy@users.noreply.github.com> Date: Mon, 4 Dec 2023 21:44:16 +0200 Subject: [PATCH 05/44] Retry on firestore index create 409 with 'underlying data changed' (#9570) --- mmv1/products/firestore/Index.yaml | 2 +- .../terraform/transport/error_retry_predicates.go | 10 +++++++--- .../transport/error_retry_predicates_test.go | 13 ++++++++++++- 3 files changed, 20 insertions(+), 5 deletions(-) diff --git a/mmv1/products/firestore/Index.yaml b/mmv1/products/firestore/Index.yaml index 877cdca86cb3..4bf078f1b521 100644 --- a/mmv1/products/firestore/Index.yaml +++ b/mmv1/products/firestore/Index.yaml @@ -17,7 +17,7 @@ base_url: projects/{{project}}/databases/{{database}}/collectionGroups/{{collect self_link: '{{name}}' immutable: true error_retry_predicates: - ["transport_tpg.FirestoreIndex409CrossTransactionContetion"] + ["transport_tpg.FirestoreIndex409Retry"] description: | Cloud Firestore indexes enable simple and complex queries against documents in a database. This resource manages composite indexes and not single diff --git a/mmv1/third_party/terraform/transport/error_retry_predicates.go b/mmv1/third_party/terraform/transport/error_retry_predicates.go index c37cce72c484..d023c0b47b19 100644 --- a/mmv1/third_party/terraform/transport/error_retry_predicates.go +++ b/mmv1/third_party/terraform/transport/error_retry_predicates.go @@ -331,11 +331,15 @@ func FirestoreField409RetryUnderlyingDataChanged(err error) (bool, string) { } // relevant for firestore in datastore mode -func FirestoreIndex409CrossTransactionContetion(err error) (bool, string) { - if gerr, ok := err.(*googleapi.Error); ok { - if gerr.Code == 409 && strings.Contains(gerr.Body, "Aborted due to cross-transaction contention") { +func FirestoreIndex409Retry(err error) (bool, string) { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 409 { + if strings.Contains(gerr.Body, "Aborted due to cross-transaction contention") { return true, "aborted due to cross-transaction contention - retrying" } + + if strings.Contains(gerr.Body, "Please retry, underlying data changed") { + return true, "underlying data changed - retrying" + } } return false, "" } diff --git a/mmv1/third_party/terraform/transport/error_retry_predicates_test.go b/mmv1/third_party/terraform/transport/error_retry_predicates_test.go index 317a7b0da9e9..a097e8219e5b 100644 --- a/mmv1/third_party/terraform/transport/error_retry_predicates_test.go +++ b/mmv1/third_party/terraform/transport/error_retry_predicates_test.go @@ -187,7 +187,18 @@ func TestFirestoreIndex409_crossTransactionContetion(t *testing.T) { Code: 409, Body: "Aborted due to cross-transaction contention", } - isRetryable, _ := FirestoreIndex409CrossTransactionContetion(&err) + isRetryable, _ := FirestoreIndex409Retry(&err) + if !isRetryable { + t.Errorf("Error not detected as retryable") + } +} + +func TestFirestoreIndex409_retryUnderlyingDataChanged(t *testing.T) { + err := googleapi.Error{ + Code: 409, + Body: "Please retry, underlying data changed", + } + isRetryable, _ := FirestoreIndex409Retry(&err) if !isRetryable { t.Errorf("Error not detected as retryable") } From 3a85747d20f2e7f4be3cba8e428073f70fe7be2b Mon Sep 17 00:00:00 2001 From: Cameron Thornton Date: Mon, 4 Dec 2023 13:44:29 -0600 Subject: [PATCH 06/44] Skip VCR test TestAccLoggingProjectSink_updatePreservesCustomWriter (#9576) --- .../services/logging/resource_logging_project_sink_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mmv1/third_party/terraform/services/logging/resource_logging_project_sink_test.go b/mmv1/third_party/terraform/services/logging/resource_logging_project_sink_test.go index c8bdcbebddc7..d89af08acdd1 100644 --- a/mmv1/third_party/terraform/services/logging/resource_logging_project_sink_test.go +++ b/mmv1/third_party/terraform/services/logging/resource_logging_project_sink_test.go @@ -165,6 +165,8 @@ func TestAccLoggingProjectSink_updatePreservesUniqueWriter(t *testing.T) { } func TestAccLoggingProjectSink_updatePreservesCustomWriter(t *testing.T) { + // Investigating failure reason, skipping in VCR for now + acctest.SkipIfVcr(t) t.Parallel() sinkName := "tf-test-sink-" + acctest.RandString(t, 10) From 482b1ab5d76a240ab9777e03535276ee7cf0df19 Mon Sep 17 00:00:00 2001 From: Aaron Liberatore Date: Mon, 4 Dec 2023 12:47:13 -0800 Subject: [PATCH 07/44] [GKE Hub]: Add Fleet binary authorization config (#9545) --- mmv1/products/gkehub2/Fleet.yaml | 22 +++++++++++++- .../resource_gke_hub_fleet_test.go.erb | 30 ++++++++++++++++++- 2 files changed, 50 insertions(+), 2 deletions(-) diff --git a/mmv1/products/gkehub2/Fleet.yaml b/mmv1/products/gkehub2/Fleet.yaml index 0c9de65f03b6..88f2b389233c 100644 --- a/mmv1/products/gkehub2/Fleet.yaml +++ b/mmv1/products/gkehub2/Fleet.yaml @@ -99,6 +99,27 @@ properties: name: "defaultClusterConfig" description: The default cluster configurations to apply across the fleet. properties: + - !ruby/object:Api::Type::NestedObject + name: "binaryAuthorizationConfig" + description: Enable/Disable binary authorization features for the cluster. + properties: + - !ruby/object:Api::Type::Enum + name: "evaluationMode" + description: Mode of operation for binauthz policy evaluation. + values: + - DISABLED + - POLICY_BINDINGS + - !ruby/object:Api::Type::Array + name: "policyBindings" + description: Binauthz policies that apply to this cluster. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: "name" + description: | + The relative resource name of the binauthz platform policy to audit. GKE + platform policies have the following format: + `projects/{project_number}/platforms/gke/policies/{policy_id}`. - !ruby/object:Api::Type::NestedObject name: "securityPostureConfig" description: Enable/Disable Security Posture features for the cluster. @@ -109,7 +130,6 @@ properties: values: - DISABLED - BASIC - - ENTERPRISE - !ruby/object:Api::Type::Enum name: "vulnerabilityMode" description: Sets which mode to use for vulnerability scanning. diff --git a/mmv1/third_party/terraform/services/gkehub2/resource_gke_hub_fleet_test.go.erb b/mmv1/third_party/terraform/services/gkehub2/resource_gke_hub_fleet_test.go.erb index d0f91aca472a..ca516876d0a7 100755 --- a/mmv1/third_party/terraform/services/gkehub2/resource_gke_hub_fleet_test.go.erb +++ b/mmv1/third_party/terraform/services/gkehub2/resource_gke_hub_fleet_test.go.erb @@ -48,6 +48,14 @@ func TestAccGKEHub2Fleet_gkehubFleetBasicExample_update(t *testing.T) { ImportState: true, ImportStateVerify: true, }, + { + Config: testAccGKEHub2Fleet_removedDefaultClusterConfig(context), + }, + { + ResourceName: "google_gke_hub_fleet.default", + ImportState: true, + ImportStateVerify: true, + }, }, }) } @@ -58,6 +66,9 @@ resource "google_gke_hub_fleet" "default" { project = google_project.project.project_id display_name = "my production fleet" default_cluster_config { + binary_authorization_config { + evaluation_mode = "DISABLED" + } security_posture_config { mode = "DISABLED" vulnerability_mode = "VULNERABILITY_DISABLED" @@ -72,8 +83,14 @@ func testAccGKEHub2Fleet_update(context map[string]interface{}) string { return gkeHubFleetProjectSetupForGA(context) + acctest.Nprintf(` resource "google_gke_hub_fleet" "default" { project = google_project.project.project_id - display_name = "my staging fleet" + display_name = "my updated fleet" default_cluster_config { + binary_authorization_config { + evaluation_mode = "POLICY_BINDINGS" + policy_bindings { + name = "projects/${google_project.project.project_id}/platforms/gke/policies/policy_id" + } + } security_posture_config { mode = "BASIC" vulnerability_mode = "VULNERABILITY_BASIC" @@ -84,6 +101,17 @@ resource "google_gke_hub_fleet" "default" { `, context) } +func testAccGKEHub2Fleet_removedDefaultClusterConfig(context map[string]interface{}) string { + return gkeHubFleetProjectSetupForGA(context) + acctest.Nprintf(` +resource "google_gke_hub_fleet" "default" { + project = google_project.project.project_id + display_name = "my updated fleet" + + depends_on = [time_sleep.wait_for_gkehub_enablement] +} +`, context) +} + func gkeHubFleetProjectSetupForGA(context map[string]interface{}) string { return acctest.Nprintf(` resource "google_project" "project" { From 3005b4c8c20569581358b99ab7b77aefe485fcc3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Wiktor=20Niesiob=C4=99dzki?= Date: Mon, 4 Dec 2023 22:49:44 +0100 Subject: [PATCH 08/44] Cloud Run v2 limits examples and docs (#9563) * Cloud Run v2 limits examples and docs Improve documentation for limits and provide examples how they can be used in Cloud Run Service and Job. * Fix limits declaration * Remove primary_resource_name --- mmv1/products/cloudrunv2/Job.yaml | 7 +++++- mmv1/products/cloudrunv2/Service.yaml | 7 +++++- .../examples/cloudrunv2_job_limits.tf.erb | 24 +++++++++++++++++++ .../examples/cloudrunv2_service_limits.tf.erb | 17 +++++++++++++ 4 files changed, 53 insertions(+), 2 deletions(-) create mode 100644 mmv1/templates/terraform/examples/cloudrunv2_job_limits.tf.erb create mode 100644 mmv1/templates/terraform/examples/cloudrunv2_service_limits.tf.erb diff --git a/mmv1/products/cloudrunv2/Job.yaml b/mmv1/products/cloudrunv2/Job.yaml index fe8509b65791..176c056e5715 100644 --- a/mmv1/products/cloudrunv2/Job.yaml +++ b/mmv1/products/cloudrunv2/Job.yaml @@ -58,6 +58,11 @@ examples: ])" vars: cloud_run_job_name: 'cloudrun-job' + - !ruby/object:Provider::Terraform::Examples + name: 'cloudrunv2_job_limits' + primary_resource_id: 'default' + vars: + cloud_run_job_name: 'cloudrun-job' - !ruby/object:Provider::Terraform::Examples name: 'cloudrunv2_job_sql' primary_resource_id: 'default' @@ -337,7 +342,7 @@ properties: - !ruby/object:Api::Type::KeyValuePairs name: 'limits' description: |- - Only memory and CPU are supported. Note: The only supported values for CPU are '1', '2', '4', and '8'. Setting 4 CPU requires at least 2Gi of memory. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go + Only memory and CPU are supported. Use key `cpu` for CPU limit and `memory` for memory limit. Note: The only supported values for CPU are '1', '2', '4', and '8'. Setting 4 CPU requires at least 2Gi of memory. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go default_from_api: true - !ruby/object:Api::Type::Array name: 'ports' diff --git a/mmv1/products/cloudrunv2/Service.yaml b/mmv1/products/cloudrunv2/Service.yaml index a1cede779aad..07bdcba208f4 100644 --- a/mmv1/products/cloudrunv2/Service.yaml +++ b/mmv1/products/cloudrunv2/Service.yaml @@ -61,6 +61,11 @@ examples: random_suffix\"])" vars: cloud_run_service_name: 'cloudrun-service' + - !ruby/object:Provider::Terraform::Examples + name: 'cloudrunv2_service_limits' + primary_resource_id: 'default' + vars: + cloud_run_service_name: 'cloudrun-service' - !ruby/object:Provider::Terraform::Examples name: 'cloudrunv2_service_sql' primary_resource_id: 'default' @@ -421,7 +426,7 @@ properties: - !ruby/object:Api::Type::KeyValuePairs name: 'limits' description: |- - Only memory and CPU are supported. Note: The only supported values for CPU are '1', '2', '4', and '8'. Setting 4 CPU requires at least 2Gi of memory. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go + Only memory and CPU are supported. Use key `cpu` for CPU limit and `memory` for memory limit. Note: The only supported values for CPU are '1', '2', '4', and '8'. Setting 4 CPU requires at least 2Gi of memory. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go default_from_api: true - !ruby/object:Api::Type::Boolean name: 'cpuIdle' diff --git a/mmv1/templates/terraform/examples/cloudrunv2_job_limits.tf.erb b/mmv1/templates/terraform/examples/cloudrunv2_job_limits.tf.erb new file mode 100644 index 000000000000..52c1523ac7d7 --- /dev/null +++ b/mmv1/templates/terraform/examples/cloudrunv2_job_limits.tf.erb @@ -0,0 +1,24 @@ +resource "google_cloud_run_v2_job" "<%= ctx[:primary_resource_id] %>" { + name = "<%= ctx[:vars]['cloud_run_job_name'] %>" + location = "us-central1" + + template { + template { + containers { + image = "us-docker.pkg.dev/cloudrun/container/hello" + resources { + limits = { + cpu = "2" + memory = "1024Mi" + } + } + } + } + } + + lifecycle { + ignore_changes = [ + launch_stage, + ] + } +} diff --git a/mmv1/templates/terraform/examples/cloudrunv2_service_limits.tf.erb b/mmv1/templates/terraform/examples/cloudrunv2_service_limits.tf.erb new file mode 100644 index 000000000000..30476a53159e --- /dev/null +++ b/mmv1/templates/terraform/examples/cloudrunv2_service_limits.tf.erb @@ -0,0 +1,17 @@ +resource "google_cloud_run_v2_service" "<%= ctx[:primary_resource_id] %>" { + name = "<%= ctx[:vars]['cloud_run_service_name'] %>" + location = "us-central1" + ingress = "INGRESS_TRAFFIC_ALL" + + template { + containers { + image = "us-docker.pkg.dev/cloudrun/container/hello" + resources { + limits = { + cpu = "2" + memory = "1024Mi" + } + } + } + } +} From 9344002d0782ca87f6df9d79122d5d4882bd1cac Mon Sep 17 00:00:00 2001 From: Will Yardley Date: Mon, 4 Dec 2023 14:32:09 -0800 Subject: [PATCH 09/44] docs: remove app engine warning on `cloud_tasks_queue` (#8342) --- mmv1/products/cloudtasks/Queue.yaml | 6 ------ 1 file changed, 6 deletions(-) diff --git a/mmv1/products/cloudtasks/Queue.yaml b/mmv1/products/cloudtasks/Queue.yaml index 511130752b00..a6b985f8db48 100644 --- a/mmv1/products/cloudtasks/Queue.yaml +++ b/mmv1/products/cloudtasks/Queue.yaml @@ -27,12 +27,6 @@ iam_policy: !ruby/object:Api::Resource::IamPolicy id_format: 'projects/{{project}}/locations/{{location}}/queues/{{name}}' custom_code: !ruby/object:Provider::Terraform::CustomCode constants: 'templates/terraform/constants/cloud_tasks_retry_config_custom_diff.go' -docs: !ruby/object:Provider::Terraform::Docs - warning: | - This resource requires an App Engine application to be created on the - project you're provisioning it on. If you haven't already enabled it, you - can create a `google_app_engine_application` resource to do so. This - resource's location will be the same as the App Engine location specified. examples: - !ruby/object:Provider::Terraform::Examples name: 'queue_basic' From 2717aaf1a1716a081efbd63fd6d46fb8260e97d1 Mon Sep 17 00:00:00 2001 From: Ilia Lazebnik Date: Tue, 5 Dec 2023 02:22:24 +0000 Subject: [PATCH 10/44] bigquery connection - spark connection type (#7498) Co-authored-by: Scott Suarez --- .../bigqueryconnection/Connection.yaml | 43 +++++++++++++++++++ .../examples/bigquery_connection_spark.tf.erb | 33 ++++++++++++++ 2 files changed, 76 insertions(+) create mode 100644 mmv1/templates/terraform/examples/bigquery_connection_spark.tf.erb diff --git a/mmv1/products/bigqueryconnection/Connection.yaml b/mmv1/products/bigqueryconnection/Connection.yaml index 253e6299b96e..5b5109303328 100644 --- a/mmv1/products/bigqueryconnection/Connection.yaml +++ b/mmv1/products/bigqueryconnection/Connection.yaml @@ -119,6 +119,12 @@ examples: vars: connection_id: 'my-connection' database: 'projects/project/instances/instance/databases/database' + - !ruby/object:Provider::Terraform::Examples + name: "bigquery_connection_spark" + region_override: "US" + primary_resource_id: "connection" + vars: + connection_id: "my-connection" properties: - !ruby/object:Api::Type::String name: name @@ -167,6 +173,7 @@ properties: - azure - cloud_spanner - cloud_resource + - spark properties: - !ruby/object:Api::Type::String name: 'instanceId' @@ -216,6 +223,7 @@ properties: - azure - cloud_spanner - cloud_resource + - spark update_mask_fields: - 'aws.access_role.iam_role_id' properties: @@ -248,6 +256,7 @@ properties: - azure - cloud_spanner - cloud_resource + - spark update_mask_fields: - 'azure.customer_tenant_id' - 'azure.federated_application_client_id' @@ -295,6 +304,7 @@ properties: - azure - cloud_spanner - cloud_resource + - spark properties: - !ruby/object:Api::Type::String name: 'database' @@ -352,6 +362,7 @@ properties: - azure - cloud_spanner - cloud_resource + - spark send_empty_value: true properties: - !ruby/object:Api::Type::String @@ -360,3 +371,35 @@ properties: The account ID of the service created for the purpose of this connection. output: true + - !ruby/object:Api::Type::NestedObject + name: spark + description: Container for connection properties to execute stored procedures for Apache Spark. + resources. + exactly_one_of: + - cloud_sql + - aws + - azure + - cloud_spanner + - cloud_resource + - spark + send_empty_value: true + properties: + - !ruby/object:Api::Type::String + name: 'serviceAccountId' + description: The account ID of the service created for the purpose of this + connection. + output: true + - !ruby/object:Api::Type::NestedObject + name: metastoreServiceConfig + description: Dataproc Metastore Service configuration for the connection. + properties: + - !ruby/object:Api::Type::String + name: metastoreService + description: Resource name of an existing Dataproc Metastore service in the form of projects/[projectId]/locations/[region]/services/[serviceId]. + - !ruby/object:Api::Type::NestedObject + name: sparkHistoryServerConfig + description: Spark History Server configuration for the connection. + properties: + - !ruby/object:Api::Type::String + name: dataprocCluster + description: Resource name of an existing Dataproc Cluster to act as a Spark History Server for the connection if the form of projects/[projectId]/regions/[region]/clusters/[cluster_name]. diff --git a/mmv1/templates/terraform/examples/bigquery_connection_spark.tf.erb b/mmv1/templates/terraform/examples/bigquery_connection_spark.tf.erb new file mode 100644 index 000000000000..18fcff706402 --- /dev/null +++ b/mmv1/templates/terraform/examples/bigquery_connection_spark.tf.erb @@ -0,0 +1,33 @@ +resource "google_bigquery_connection" "<%= ctx[:primary_resource_id] %>" { + connection_id = "<%= ctx[:vars]['connection_id'] %>" + location = "US" + friendly_name = "👋" + description = "a riveting description" + spark { + spark_history_server_config { + dataproc_cluster = google_dataproc_cluster.basic.id + } + } +} + +resource "google_dataproc_cluster" "basic" { + name = "<%= ctx[:vars]['connection_id'] %>" + region = "us-central1" + + cluster_config { + # Keep the costs down with smallest config we can get away with + software_config { + override_properties = { + "dataproc:dataproc.allow.zero.workers" = "true" + } + } + + master_config { + num_instances = 1 + machine_type = "e2-standard-2" + disk_config { + boot_disk_size_gb = 35 + } + } + } + } From 805f9afb5e5a4857aeac3d44c855818382b3dc21 Mon Sep 17 00:00:00 2001 From: Thomas Rodgers Date: Tue, 5 Dec 2023 11:16:08 -0800 Subject: [PATCH 11/44] Recursively look for function calls in missing test detector (#9580) --- tools/missing-test-detector/reader.go | 76 +++++++++---------- tools/missing-test-detector/reader_test.go | 44 +++++++++-- .../testdata/service/function_call_test.go | 34 +++++++++ 3 files changed, 107 insertions(+), 47 deletions(-) create mode 100644 tools/missing-test-detector/testdata/service/function_call_test.go diff --git a/tools/missing-test-detector/reader.go b/tools/missing-test-detector/reader.go index 3ac95d54092d..651bb1d6875d 100644 --- a/tools/missing-test-detector/reader.go +++ b/tools/missing-test-detector/reader.go @@ -216,25 +216,23 @@ func readStepsCompLit(stepsCompLit *ast.CompositeLit, funcDecls map[string]*ast. for _, eltCompLitElt := range eltCompLit.Elts { if keyValueExpr, ok := eltCompLitElt.(*ast.KeyValueExpr); ok { if ident, ok := keyValueExpr.Key.(*ast.Ident); ok && ident.Name == "Config" { + var configStr string + var err error if configCallExpr, ok := keyValueExpr.Value.(*ast.CallExpr); ok { - step, err := readConfigCallExpr(configCallExpr, funcDecls, varDecls) - if err != nil { - errs = append(errs, err) - } - test.Steps = append(test.Steps, step) + configStr, err = readConfigCallExpr(configCallExpr, funcDecls, varDecls) } else if ident, ok := keyValueExpr.Value.(*ast.Ident); ok { if configVar, ok := varDecls[ident.Name]; ok { - configStr, err := strconv.Unquote(configVar.Value) - if err != nil { - errs = append(errs, err) - } - step, err := readConfigStr(configStr) - if err != nil { - errs = append(errs, err) - } - test.Steps = append(test.Steps, step) + configStr, err = strconv.Unquote(configVar.Value) } } + if err != nil { + errs = append(errs, err) + } + step, err := readConfigStr(configStr) + if err != nil { + errs = append(errs, err) + } + test.Steps = append(test.Steps, step) } } } @@ -247,46 +245,40 @@ func readStepsCompLit(stepsCompLit *ast.CompositeLit, funcDecls map[string]*ast. } // Read the call expression in the public test function that returns the config. -func readConfigCallExpr(configCallExpr *ast.CallExpr, funcDecls map[string]*ast.FuncDecl, varDecls map[string]*ast.BasicLit) (Step, error) { +func readConfigCallExpr(configCallExpr *ast.CallExpr, funcDecls map[string]*ast.FuncDecl, varDecls map[string]*ast.BasicLit) (string, error) { if ident, ok := configCallExpr.Fun.(*ast.Ident); ok { if configFunc, ok := funcDecls[ident.Name]; ok { - return readConfigFunc(configFunc) + return readConfigFunc(configFunc, funcDecls, varDecls) } - return nil, fmt.Errorf("failed to find function declaration %s", ident.Name) + return "", fmt.Errorf("failed to find function declaration %s", ident.Name) } - return nil, fmt.Errorf("failed to get ident for %v", configCallExpr.Fun) + return "", fmt.Errorf("failed to get ident for %v", configCallExpr.Fun) } -func readConfigFunc(configFunc *ast.FuncDecl) (Step, error) { +func readConfigFunc(configFunc *ast.FuncDecl, funcDecls map[string]*ast.FuncDecl, varDecls map[string]*ast.BasicLit) (string, error) { for _, stmt := range configFunc.Body.List { if returnStmt, ok := stmt.(*ast.ReturnStmt); ok { - for _, result := range returnStmt.Results { - configStr, err := readConfigFuncResult(result) - if err != nil { - return nil, err - } - if configStr != "" { - return readConfigStr(configStr) - } + if len(returnStmt.Results) > 0 { + return readConfigFuncResult(returnStmt.Results[0], funcDecls, varDecls) } - return nil, fmt.Errorf("failed to find a config string in results %v", returnStmt.Results) + return "", fmt.Errorf("failed to find a config string in results %v", returnStmt.Results) } } - return nil, fmt.Errorf("failed to find a return statement in %v", configFunc.Body.List) + return "", fmt.Errorf("failed to find a return statement in %v", configFunc.Body.List) } // Read the return result of a config func and return the config string. -func readConfigFuncResult(result ast.Expr) (string, error) { +func readConfigFuncResult(result ast.Expr, funcDecls map[string]*ast.FuncDecl, varDecls map[string]*ast.BasicLit) (string, error) { if basicLit, ok := result.(*ast.BasicLit); ok && basicLit.Kind == token.STRING { return strconv.Unquote(basicLit.Value) } else if callExpr, ok := result.(*ast.CallExpr); ok { - return readConfigFuncCallExpr(callExpr) + return readConfigFuncCallExpr(callExpr, funcDecls, varDecls) } else if binaryExpr, ok := result.(*ast.BinaryExpr); ok { - xConfigStr, err := readConfigFuncResult(binaryExpr.X) + xConfigStr, err := readConfigFuncResult(binaryExpr.X, funcDecls, varDecls) if err != nil { return "", err } - yConfigStr, err := readConfigFuncResult(binaryExpr.Y) + yConfigStr, err := readConfigFuncResult(binaryExpr.Y, funcDecls, varDecls) if err != nil { return "", err } @@ -298,16 +290,16 @@ func readConfigFuncResult(result ast.Expr) (string, error) { // Read the call expression in the config function that returns the config string. // The call expression can contain a nested call expression. // Return the config string. -func readConfigFuncCallExpr(configFuncCallExpr *ast.CallExpr) (string, error) { - if len(configFuncCallExpr.Args) == 0 { - return "", fmt.Errorf("no arguments found for call expression %v", configFuncCallExpr) - } - if basicLit, ok := configFuncCallExpr.Args[0].(*ast.BasicLit); ok && basicLit.Kind == token.STRING { - return strconv.Unquote(basicLit.Value) - } else if nestedCallExpr, ok := configFuncCallExpr.Args[0].(*ast.CallExpr); ok { - return readConfigFuncCallExpr(nestedCallExpr) +func readConfigFuncCallExpr(configFuncCallExpr *ast.CallExpr, funcDecls map[string]*ast.FuncDecl, varDecls map[string]*ast.BasicLit) (string, error) { + if len(configFuncCallExpr.Args) > 0 { + if basicLit, ok := configFuncCallExpr.Args[0].(*ast.BasicLit); ok && basicLit.Kind == token.STRING { + return strconv.Unquote(basicLit.Value) + } else if nestedCallExpr, ok := configFuncCallExpr.Args[0].(*ast.CallExpr); ok { + return readConfigFuncCallExpr(nestedCallExpr, funcDecls, varDecls) + } } - return "", fmt.Errorf("no string literal found in arguments to call expression %v", configFuncCallExpr) + // Config string not readable from args, attempt to read call expression as a helper function. + return readConfigCallExpr(configFuncCallExpr, funcDecls, varDecls) } // Read the config string and return a test step. diff --git a/tools/missing-test-detector/reader_test.go b/tools/missing-test-detector/reader_test.go index e42d3eff4137..cb70579f6b46 100644 --- a/tools/missing-test-detector/reader_test.go +++ b/tools/missing-test-detector/reader_test.go @@ -144,10 +144,8 @@ func TestReadCrossFileTests(t *testing.T) { if err != nil { t.Fatalf("error reading cross file tests: %v", err) } - if len(tests) != 2 { - t.Fatalf("unexpected number of tests: %d, expected 2", len(tests)) - } - if expectedTests := []*Test{ + + expectedTests := []*Test{ { Name: "testAccCrossFile1", Steps: []Step{ @@ -172,8 +170,44 @@ func TestReadCrossFileTests(t *testing.T) { }, }, }, - }; !reflect.DeepEqual(tests, expectedTests) { + } + + if len(tests) != len(expectedTests) { + t.Fatalf("unexpected number of tests: %d, expected %d", len(tests), len(expectedTests)) + } + + if !reflect.DeepEqual(tests, expectedTests) { t.Errorf("found unexpected cross file tests: %v, expected %v", tests, expectedTests) } } + +func TestReadHelperFunctionCall(t *testing.T) { + tests, err := readTestFiles([]string{"testdata/service/function_call_test.go"}) + if err != nil { + t.Fatalf("error reading function call test: %v", err) + } + if len(tests) != 1 { + t.Fatalf("unexpected number of tests: %d, expected 1", len(tests)) + } + expectedTest := &Test{ + Name: "TestAccFunctionCallResource", + Steps: []Step{ + Step{ + "helped_resource": Resources{ + "primary": Resource{ + "field_one": "\"value-one\"", + }, + }, + "helper_resource": Resources{ + "default": Resource{ + "field_one": "\"value-one\"", + }, + }, + }, + }, + } + if !reflect.DeepEqual(tests[0], expectedTest) { + t.Errorf("found unexpected tests using helper function: %v, expected %v", tests[0], expectedTest) + } +} diff --git a/tools/missing-test-detector/testdata/service/function_call_test.go b/tools/missing-test-detector/testdata/service/function_call_test.go new file mode 100644 index 000000000000..a430e8a796a9 --- /dev/null +++ b/tools/missing-test-detector/testdata/service/function_call_test.go @@ -0,0 +1,34 @@ +package service_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google-beta/google-beta/acctest" +) + +func TestAccFunctionCallResource(t *testing.T) { + acctest.VcrTest(t, resource.TestCase{ + Steps: []resource.TestStep{ + { + Config: testAccFunctionCallResource(), + }, + }, + }) +} + +func helperFunction() string { + return ` +resource "helper_resource" "default" { + field_one = "value-one" +} +` +} + +func testAccFunctionCallResource() string { + return helperFunction() + acctest.Nprintf(` +resource "helped_resource" "primary" { + field_one = "value-one" +} +`) +} From 05c4410c0e599f33ab255e3820187855c82c7739 Mon Sep 17 00:00:00 2001 From: pratikgarg10 <33780624+pratikgarg10@users.noreply.github.com> Date: Tue, 5 Dec 2023 11:50:19 -0800 Subject: [PATCH 12/44] add support for IAM Group authentication to google_sql_user (#9578) --- .../services/sql/resource_sql_user.go | 6 ++++-- .../services/sql/resource_sql_user_test.go | 11 ++++++++++ .../website/docs/r/sql_user.html.markdown | 21 ++++++++++++++++++- 3 files changed, 35 insertions(+), 3 deletions(-) diff --git a/mmv1/third_party/terraform/services/sql/resource_sql_user.go b/mmv1/third_party/terraform/services/sql/resource_sql_user.go index 599e55df39f7..15bf8061b05d 100644 --- a/mmv1/third_party/terraform/services/sql/resource_sql_user.go +++ b/mmv1/third_party/terraform/services/sql/resource_sql_user.go @@ -102,8 +102,10 @@ func ResourceSqlUser() *schema.Resource { ForceNew: true, DiffSuppressFunc: tpgresource.EmptyOrDefaultStringSuppress("BUILT_IN"), Description: `The user type. It determines the method to authenticate the user during login. - The default is the database's built-in user type. Flags include "BUILT_IN", "CLOUD_IAM_USER", or "CLOUD_IAM_SERVICE_ACCOUNT".`, - ValidateFunc: validation.StringInSlice([]string{"BUILT_IN", "CLOUD_IAM_USER", "CLOUD_IAM_SERVICE_ACCOUNT", ""}, false), + The default is the database's built-in user type. Flags include "BUILT_IN", "CLOUD_IAM_USER", "CLOUD_IAM_SERVICE_ACCOUNT", + "CLOUD_IAM_GROUP", "CLOUD_IAM_GROUP_USER" or "CLOUD_IAM_GROUP_SERVICE_ACCOUNT".`, + ValidateFunc: validation.StringInSlice([]string{"BUILT_IN", "CLOUD_IAM_USER", "CLOUD_IAM_SERVICE_ACCOUNT", + "CLOUD_IAM_GROUP", "CLOUD_IAM_GROUP_USER", "CLOUD_IAM_GROUP_SERVICE_ACCOUNT", ""}, false), }, "sql_server_user_details": { Type: schema.TypeList, diff --git a/mmv1/third_party/terraform/services/sql/resource_sql_user_test.go b/mmv1/third_party/terraform/services/sql/resource_sql_user_test.go index efcf0d5f5962..9091ef56a534 100644 --- a/mmv1/third_party/terraform/services/sql/resource_sql_user_test.go +++ b/mmv1/third_party/terraform/services/sql/resource_sql_user_test.go @@ -26,6 +26,7 @@ func TestAccSqlUser_mysql(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckGoogleSqlUserExists(t, "google_sql_user.user1"), testAccCheckGoogleSqlUserExists(t, "google_sql_user.user2"), + testAccCheckGoogleSqlUserExists(t, "google_sql_user.user3"), ), }, { @@ -34,6 +35,7 @@ func TestAccSqlUser_mysql(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckGoogleSqlUserExists(t, "google_sql_user.user1"), testAccCheckGoogleSqlUserExists(t, "google_sql_user.user2"), + testAccCheckGoogleSqlUserExists(t, "google_sql_user.user3"), ), }, { @@ -311,6 +313,15 @@ resource "google_sql_user" "user2" { instance = google_sql_database_instance.instance.name host = "gmail.com" password = "hunter2" + type = "CLOUD_IAM_USER" +} + +resource "google_sql_user" "user3" { + name = "admin" + instance = google_sql_database_instance.instance.name + host = "gmail.com" + password = "hunter3" + type = "CLOUD_IAM_GROUP" } `, instance, password) } diff --git a/mmv1/third_party/terraform/website/docs/r/sql_user.html.markdown b/mmv1/third_party/terraform/website/docs/r/sql_user.html.markdown index 9b96d4463a22..0072e8aa762b 100644 --- a/mmv1/third_party/terraform/website/docs/r/sql_user.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/sql_user.html.markdown @@ -72,6 +72,24 @@ resource "google_sql_user" "iam_service_account_user" { instance = google_sql_database_instance.main.name type = "CLOUD_IAM_SERVICE_ACCOUNT" } + +resource "google_sql_user" "iam_group" { + name = "group1@example.com" + instance = google_sql_database_instance.main.name + type = "CLOUD_IAM_GROUP" +} + +resource "google_sql_user" "iam_group_user" { + name = "group_user1@example.com" + instance = google_sql_database_instance.main.name + type = "CLOUD_IAM_GROUP_USER" +} + +resource "google_sql_user" "iam_group_service_account_user" { + name = "my-service-account@example.iam.gserviceaccount.com" + instance = google_sql_database_instance.main.name + type = "CLOUD_IAM_GROUP_SERVICE_ACCOUNT" +} ``` ## Argument Reference @@ -91,7 +109,8 @@ The following arguments are supported: * `type` - (Optional) The user type. It determines the method to authenticate the user during login. The default is the database's built-in user type. Flags - include "BUILT_IN", "CLOUD_IAM_USER", or "CLOUD_IAM_SERVICE_ACCOUNT". + include "BUILT_IN", "CLOUD_IAM_USER", "CLOUD_IAM_SERVICE_ACCOUNT", + "CLOUD_IAM_GROUP", "CLOUD_IAM_GROUP_USER" or "CLOUD_IAM_GROUP_SERVICE_ACCOUNT". * `deletion_policy` - (Optional) The deletion policy for the user. Setting `ABANDON` allows the resource to be abandoned rather than deleted. This is useful From 99b5c55dd2b3f66b91a192e9a22a7ffe51a387b1 Mon Sep 17 00:00:00 2001 From: Gorlami96 <30335782+Gorlami96@users.noreply.github.com> Date: Wed, 6 Dec 2023 02:17:46 +0530 Subject: [PATCH 13/44] migrating router-bgp-peer to be a handwritten resource (#9559) * Making router-bgp-peer as a handwritten resource * Adding sweeper file, removing example suffix from test files and adding resource in the handwritten section in provider * Removing unused import statements * Removing unused import statements from router_peer * Fix indentation --------- Co-authored-by: Shivang Dixit Co-authored-by: Thomas Rodgers --- mmv1/products/compute/RouterBgpPeer.yaml | 315 ---- .../provider/provider_mmv1_resources.go.erb | 3 +- .../resource_compute_router_peer.go.erb | 1318 +++++++++++++++++ .../resource_compute_router_peer_sweeper.go | 122 ++ .../resource_compute_router_peer_test.go.erb | 188 +++ .../docs/r/compute_router_peer.html.markdown | 410 +++++ 6 files changed, 2040 insertions(+), 316 deletions(-) delete mode 100644 mmv1/products/compute/RouterBgpPeer.yaml create mode 100644 mmv1/third_party/terraform/services/compute/resource_compute_router_peer.go.erb create mode 100644 mmv1/third_party/terraform/services/compute/resource_compute_router_peer_sweeper.go create mode 100644 mmv1/third_party/terraform/services/compute/resource_compute_router_peer_test.go.erb create mode 100644 mmv1/third_party/terraform/website/docs/r/compute_router_peer.html.markdown diff --git a/mmv1/products/compute/RouterBgpPeer.yaml b/mmv1/products/compute/RouterBgpPeer.yaml deleted file mode 100644 index 7b83bb74dcba..000000000000 --- a/mmv1/products/compute/RouterBgpPeer.yaml +++ /dev/null @@ -1,315 +0,0 @@ -# Copyright 2023 Google Inc. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - ---- !ruby/object:Api::Resource -name: 'RouterBgpPeer' -base_url: projects/{{project}}/regions/{{region}}/routers/{{router}} -self_link: projects/{{project}}/regions/{{region}}/routers/{{router}} -create_verb: :PATCH -update_verb: :PATCH -delete_verb: :PATCH -identity: - - name -nested_query: !ruby/object:Api::Resource::NestedQuery - modify_by_patch: true - keys: - - bgpPeers -description: | - BGP information that must be configured into the routing stack to - establish BGP peering. This information must specify the peer ASN - and either the interface name, IP address, or peer IP address. - Please refer to RFC4273. -references: !ruby/object:Api::Resource::ReferenceLinks - guides: - 'Google Cloud Router': 'https://cloud.google.com/router/docs/' - api: 'https://cloud.google.com/compute/docs/reference/rest/v1/routers' -async: !ruby/object:Api::OpAsync - operation: !ruby/object:Api::OpAsync::Operation - kind: 'compute#operation' - path: 'name' - base_url: 'projects/{{project}}/regions/{{regions}}/operations/{{op_id}}' - wait_ms: 1000 - result: !ruby/object:Api::OpAsync::Result - path: 'targetLink' - status: !ruby/object:Api::OpAsync::Status - path: 'status' - complete: 'DONE' - allowed: - - 'PENDING' - - 'RUNNING' - - 'DONE' - error: !ruby/object:Api::OpAsync::Error - path: 'error/errors' - message: 'message' -legacy_name: 'google_compute_router_peer' -exclude_tgc: true -id_format: 'projects/{{project}}/regions/{{region}}/routers/{{router}}/{{name}}' -mutex: router/{{region}}/{{router}} -custom_code: !ruby/object:Provider::Terraform::CustomCode - constants: templates/terraform/constants/router_bgp_peer.erb -examples: - # These examples are not used to autogenerate tests, as fine-grained - # resources do not fit the normal test flow - we need to test deletion - # in a test step while parent resource still exists vs in CheckDestroy - # when all resources have been deleted. - - !ruby/object:Provider::Terraform::Examples - name: 'router_peer_basic' - primary_resource_id: 'peer' - skip_test: true - vars: - router_name: 'my-router' - peer_name: 'my-router-peer' - - !ruby/object:Provider::Terraform::Examples - name: 'router_peer_disabled' - primary_resource_id: 'peer' - skip_test: true - vars: - router_name: 'my-router' - peer_name: 'my-router-peer' - - !ruby/object:Provider::Terraform::Examples - name: 'router_peer_bfd' - primary_resource_id: 'peer' - skip_test: true - vars: - router_name: 'my-router' - peer_name: 'my-router-peer' - - !ruby/object:Provider::Terraform::Examples - name: 'router_peer_router_appliance' - primary_resource_id: 'peer' - vars: - router_name: 'my-router' - peer_name: 'my-router-peer' -parameters: - - !ruby/object:Api::Type::ResourceRef - name: 'router' - resource: 'Router' - imports: 'name' - description: | - The name of the Cloud Router in which this BgpPeer will be configured. - required: true - immutable: true - url_param_only: true - - !ruby/object:Api::Type::ResourceRef - name: region - resource: Region - imports: name - description: | - Region where the router and BgpPeer reside. - If it is not provided, the provider region is used. - immutable: true - required: false - url_param_only: true - default_from_api: true - custom_flatten: 'templates/terraform/custom_flatten/name_from_self_link.erb' -properties: - - !ruby/object:Api::Type::String - name: 'name' - description: | - Name of this BGP peer. The name must be 1-63 characters long, - and comply with RFC1035. Specifically, the name must be 1-63 characters - long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which - means the first character must be a lowercase letter, and all - following characters must be a dash, lowercase letter, or digit, - except the last character, which cannot be a dash. - required: true - immutable: true - validation: !ruby/object:Provider::Terraform::Validation - function: 'verify.ValidateRFC1035Name(2, 63)' - - !ruby/object:Api::Type::String - name: 'interface' - api_name: 'interfaceName' - description: | - Name of the interface the BGP peer is associated with. - required: true - immutable: true - - !ruby/object:Api::Type::String - name: 'ipAddress' - description: | - IP address of the interface inside Google Cloud Platform. - Only IPv4 is supported. - default_from_api: true - - !ruby/object:Api::Type::String - name: 'peerIpAddress' - description: | - IP address of the BGP interface outside Google Cloud Platform. - Only IPv4 is supported. Required if `ip_address` is set. - default_from_api: true - - !ruby/object:Api::Type::Integer - name: 'peerAsn' - description: | - Peer BGP Autonomous System Number (ASN). - Each BGP interface may use a different value. - required: true - - !ruby/object:Api::Type::Integer - name: 'advertisedRoutePriority' - description: | - The priority of routes advertised to this BGP peer. - Where there is more than one matching route of maximum - length, the routes with the lowest priority value win. - send_empty_value: true - - !ruby/object:Api::Type::Enum - name: advertiseMode - description: | - User-specified flag to indicate which mode to use for advertisement. - Valid values of this enum field are: `DEFAULT`, `CUSTOM` - values: - - :DEFAULT - - :CUSTOM - default_value: :DEFAULT - custom_flatten: 'templates/terraform/custom_flatten/default_if_empty.erb' - - !ruby/object:Api::Type::Array - name: advertisedGroups - description: | - User-specified list of prefix groups to advertise in custom - mode, which currently supports the following option: - - * `ALL_SUBNETS`: Advertises all of the router's own VPC subnets. - This excludes any routes learned for subnets that use VPC Network - Peering. - - - Note that this field can only be populated if advertiseMode is `CUSTOM` - and overrides the list defined for the router (in the "bgp" message). - These groups are advertised in addition to any specified prefixes. - Leave this field blank to advertise no custom groups. - send_empty_value: true - item_type: Api::Type::String - - !ruby/object:Api::Type::Array - name: advertisedIpRanges - description: | - User-specified list of individual IP ranges to advertise in - custom mode. This field can only be populated if advertiseMode - is `CUSTOM` and is advertised to all peers of the router. These IP - ranges will be advertised in addition to any specified groups. - Leave this field blank to advertise no custom IP ranges. - send_empty_value: true - item_type: !ruby/object:Api::Type::NestedObject - properties: - - !ruby/object:Api::Type::String - name: range - required: true - description: | - The IP range to advertise. The value must be a - CIDR-formatted string. - - !ruby/object:Api::Type::String - name: description - description: | - User-specified description for the IP range. - send_empty_value: true - - !ruby/object:Api::Type::String - name: 'managementType' - description: | - The resource that configures and manages this BGP peer. - - * `MANAGED_BY_USER` is the default value and can be managed by - you or other users - * `MANAGED_BY_ATTACHMENT` is a BGP peer that is configured and - managed by Cloud Interconnect, specifically by an - InterconnectAttachment of type PARTNER. Google automatically - creates, updates, and deletes this type of BGP peer when the - PARTNER InterconnectAttachment is created, updated, - or deleted. - output: true - - !ruby/object:Api::Type::NestedObject - name: bfd - description: | - BFD configuration for the BGP peering. - default_from_api: true - properties: - - !ruby/object:Api::Type::Enum - name: sessionInitializationMode - description: | - The BFD session initialization mode for this BGP peer. - If set to `ACTIVE`, the Cloud Router will initiate the BFD session - for this BGP peer. If set to `PASSIVE`, the Cloud Router will wait - for the peer router to initiate the BFD session for this BGP peer. - If set to `DISABLED`, BFD is disabled for this BGP peer. - values: - - :ACTIVE - - :DISABLED - - :PASSIVE - required: true - - !ruby/object:Api::Type::Integer - name: minTransmitInterval - description: | - The minimum interval, in milliseconds, between BFD control packets - transmitted to the peer router. The actual value is negotiated - between the two routers and is equal to the greater of this value - and the corresponding receive interval of the other router. If set, - this value must be between 1000 and 30000. - default_value: 1000 - - !ruby/object:Api::Type::Integer - name: minReceiveInterval - description: | - The minimum interval, in milliseconds, between BFD control packets - received from the peer router. The actual value is negotiated - between the two routers and is equal to the greater of this value - and the transmit interval of the other router. If set, this value - must be between 1000 and 30000. - default_value: 1000 - - !ruby/object:Api::Type::Integer - name: multiplier - description: | - The number of consecutive BFD packets that must be missed before - BFD declares that a peer is unavailable. If set, the value must - be a value between 5 and 16. - default_value: 5 - - !ruby/object:Api::Type::Boolean - name: 'enable' - description: | - The status of the BGP peer connection. If set to false, any active session - with the peer is terminated and all associated routing information is removed. - If set to true, the peer connection can be established with routing information. - The default is true. - default_value: true - custom_expand: 'templates/terraform/custom_expand/bool_to_upper_string.erb' - custom_flatten: 'templates/terraform/custom_flatten/string_to_bool_default_true.erb' - send_empty_value: true - - !ruby/object:Api::Type::ResourceRef - name: 'routerApplianceInstance' - resource: 'Instance' - imports: 'selfLink' - description: | - The URI of the VM instance that is used as third-party router appliances - such as Next Gen Firewalls, Virtual Routers, or Router Appliances. - The VM instance must be located in zones contained in the same region as - this Cloud Router. The VM instance is the peer side of the BGP session. - custom_expand: 'templates/terraform/custom_expand/resourceref_with_validation.go.erb' - - !ruby/object:Api::Type::Boolean - name: 'enableIpv6' - description: | - Enable IPv6 traffic over BGP Peer. If not specified, it is disabled by default. - default_value: false - send_empty_value: true - - !ruby/object:Api::Type::String - name: 'ipv6NexthopAddress' - description: | - IPv6 address of the interface inside Google Cloud Platform. - The address must be in the range 2600:2d00:0:2::/64 or 2600:2d00:0:3::/64. - If you do not specify the next hop addresses, Google Cloud automatically - assigns unused addresses from the 2600:2d00:0:2::/64 or 2600:2d00:0:3::/64 range for you. - default_from_api: true - validation: !ruby/object:Provider::Terraform::Validation - function: 'verify.ValidateIpAddress' - diff_suppress_func: ipv6RepresentationDiffSuppress - - !ruby/object:Api::Type::String - name: 'peerIpv6NexthopAddress' - description: | - IPv6 address of the BGP interface outside Google Cloud Platform. - The address must be in the range 2600:2d00:0:2::/64 or 2600:2d00:0:3::/64. - If you do not specify the next hop addresses, Google Cloud automatically - assigns unused addresses from the 2600:2d00:0:2::/64 or 2600:2d00:0:3::/64 range for you. - default_from_api: true - validation: !ruby/object:Provider::Terraform::Validation - function: 'verify.ValidateIpAddress' - diff_suppress_func: ipv6RepresentationDiffSuppress diff --git a/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb b/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb index f05ea5d6faba..8f41adb2b48e 100644 --- a/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb +++ b/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb @@ -331,6 +331,7 @@ var handwrittenResources = map[string]*schema.Resource{ "google_compute_attached_disk": compute.ResourceComputeAttachedDisk(), "google_compute_instance": compute.ResourceComputeInstance(), "google_compute_disk_async_replication": compute.ResourceComputeDiskAsyncReplication(), + "google_compute_router_peer": compute.ResourceComputeRouterBgpPeer(), <% unless version == 'ga' -%> "google_compute_instance_from_machine_image": compute.ResourceComputeInstanceFromMachineImage(), <% end -%> @@ -472,4 +473,4 @@ var handwrittenIAMResources = map[string]*schema.Resource{ "google_service_account_iam_member": tpgiamresource.ResourceIamMember(resourcemanager.IamServiceAccountSchema, resourcemanager.NewServiceAccountIamUpdater, resourcemanager.ServiceAccountIdParseFunc), "google_service_account_iam_policy": tpgiamresource.ResourceIamPolicy(resourcemanager.IamServiceAccountSchema, resourcemanager.NewServiceAccountIamUpdater, resourcemanager.ServiceAccountIdParseFunc), // ####### END non-generated IAM resources ########### -} \ No newline at end of file +} diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_router_peer.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_router_peer.go.erb new file mode 100644 index 000000000000..4a5729ccc513 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/resource_compute_router_peer.go.erb @@ -0,0 +1,1318 @@ +<% autogen_exception -%> +package compute + +import ( + "fmt" + "log" + "net" + "reflect" + "strconv" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" + +) + +func ipv6RepresentationDiffSuppress(_, old, new string, d *schema.ResourceData) bool { + //Diff suppress any equal IPV6 address in different representations + //An IPV6 address can have long or short representations + //E.g 2001:0cb0:0000:0000:0fc0:0000:0000:0abc, after compression: + //A) 2001:0cb0::0fc0:0000:0000:0abc (Omit groups of all zeros) + //B) 2001:cb0:0:0:fc0::abc (Omit leading zeros) + //C) 2001:cb0::fc0:0:0:abc (Combining A and B) + //The GCP API follows rule B) for normalzation + + oldIp := net.ParseIP(old) + newIp := net.ParseIP(new) + return oldIp.Equal(newIp) +} + +func ResourceComputeRouterBgpPeer() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeRouterBgpPeerCreate, + Read: resourceComputeRouterBgpPeerRead, + Update: resourceComputeRouterBgpPeerUpdate, + Delete: resourceComputeRouterBgpPeerDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeRouterBgpPeerImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + ), + + Schema: map[string]*schema.Schema{ + "interface": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Name of the interface the BGP peer is associated with.`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateRFC1035Name(2, 63), + Description: `Name of this BGP peer. The name must be 1-63 characters long, +and comply with RFC1035. Specifically, the name must be 1-63 characters +long and match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which +means the first character must be a lowercase letter, and all +following characters must be a dash, lowercase letter, or digit, +except the last character, which cannot be a dash.`, + }, + "peer_asn": { + Type: schema.TypeInt, + Required: true, + Description: `Peer BGP Autonomous System Number (ASN). +Each BGP interface may use a different value.`, + }, + "router": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The name of the Cloud Router in which this BgpPeer will be configured.`, + }, + "advertise_mode": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"DEFAULT", "CUSTOM", ""}), + Description: `User-specified flag to indicate which mode to use for advertisement. +Valid values of this enum field are: 'DEFAULT', 'CUSTOM' Default value: "DEFAULT" Possible values: ["DEFAULT", "CUSTOM"]`, + Default: "DEFAULT", + }, + "advertised_groups": { + Type: schema.TypeList, + Optional: true, + Description: `User-specified list of prefix groups to advertise in custom +mode, which currently supports the following option: + +* 'ALL_SUBNETS': Advertises all of the router's own VPC subnets. +This excludes any routes learned for subnets that use VPC Network +Peering. + + +Note that this field can only be populated if advertiseMode is 'CUSTOM' +and overrides the list defined for the router (in the "bgp" message). +These groups are advertised in addition to any specified prefixes. +Leave this field blank to advertise no custom groups.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "advertised_ip_ranges": { + Type: schema.TypeList, + Optional: true, + Description: `User-specified list of individual IP ranges to advertise in +custom mode. This field can only be populated if advertiseMode +is 'CUSTOM' and is advertised to all peers of the router. These IP +ranges will be advertised in addition to any specified groups. +Leave this field blank to advertise no custom IP ranges.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "range": { + Type: schema.TypeString, + Required: true, + Description: `The IP range to advertise. The value must be a +CIDR-formatted string.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `User-specified description for the IP range.`, + }, + }, + }, + }, + "advertised_route_priority": { + Type: schema.TypeInt, + Optional: true, + Description: `The priority of routes advertised to this BGP peer. +Where there is more than one matching route of maximum +length, the routes with the lowest priority value win.`, + }, + "bfd": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `BFD configuration for the BGP peering.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "session_initialization_mode": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"ACTIVE", "DISABLED", "PASSIVE"}), + Description: `The BFD session initialization mode for this BGP peer. +If set to 'ACTIVE', the Cloud Router will initiate the BFD session +for this BGP peer. If set to 'PASSIVE', the Cloud Router will wait +for the peer router to initiate the BFD session for this BGP peer. +If set to 'DISABLED', BFD is disabled for this BGP peer. Possible values: ["ACTIVE", "DISABLED", "PASSIVE"]`, + }, + "min_receive_interval": { + Type: schema.TypeInt, + Optional: true, + Description: `The minimum interval, in milliseconds, between BFD control packets +received from the peer router. The actual value is negotiated +between the two routers and is equal to the greater of this value +and the transmit interval of the other router. If set, this value +must be between 1000 and 30000.`, + Default: 1000, + }, + "min_transmit_interval": { + Type: schema.TypeInt, + Optional: true, + Description: `The minimum interval, in milliseconds, between BFD control packets +transmitted to the peer router. The actual value is negotiated +between the two routers and is equal to the greater of this value +and the corresponding receive interval of the other router. If set, +this value must be between 1000 and 30000.`, + Default: 1000, + }, + "multiplier": { + Type: schema.TypeInt, + Optional: true, + Description: `The number of consecutive BFD packets that must be missed before +BFD declares that a peer is unavailable. If set, the value must +be a value between 5 and 16.`, + Default: 5, + }, + }, + }, + }, + "enable": { + Type: schema.TypeBool, + Optional: true, + Description: `The status of the BGP peer connection. If set to false, any active session +with the peer is terminated and all associated routing information is removed. +If set to true, the peer connection can be established with routing information. +The default is true.`, + Default: true, + }, + "enable_ipv6": { + Type: schema.TypeBool, + Optional: true, + Description: `Enable IPv6 traffic over BGP Peer. If not specified, it is disabled by default.`, + Default: false, + }, + "ip_address": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `IP address of the interface inside Google Cloud Platform. +Only IPv4 is supported.`, + }, + "ipv6_nexthop_address": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateFunc: verify.ValidateIpAddress, + DiffSuppressFunc: ipv6RepresentationDiffSuppress, + Description: `IPv6 address of the interface inside Google Cloud Platform. +The address must be in the range 2600:2d00:0:2::/64 or 2600:2d00:0:3::/64. +If you do not specify the next hop addresses, Google Cloud automatically +assigns unused addresses from the 2600:2d00:0:2::/64 or 2600:2d00:0:3::/64 range for you.`, + }, + "peer_ip_address": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `IP address of the BGP interface outside Google Cloud Platform. +Only IPv4 is supported. Required if 'ip_address' is set.`, + }, + "peer_ipv6_nexthop_address": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateFunc: verify.ValidateIpAddress, + DiffSuppressFunc: ipv6RepresentationDiffSuppress, + Description: `IPv6 address of the BGP interface outside Google Cloud Platform. +The address must be in the range 2600:2d00:0:2::/64 or 2600:2d00:0:3::/64. +If you do not specify the next hop addresses, Google Cloud automatically +assigns unused addresses from the 2600:2d00:0:2::/64 or 2600:2d00:0:3::/64 range for you.`, + }, + "region": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `Region where the router and BgpPeer reside. +If it is not provided, the provider region is used.`, + }, + "router_appliance_instance": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The URI of the VM instance that is used as third-party router appliances +such as Next Gen Firewalls, Virtual Routers, or Router Appliances. +The VM instance must be located in zones contained in the same region as +this Cloud Router. The VM instance is the peer side of the BGP session.`, + }, + "management_type": { + Type: schema.TypeString, + Computed: true, + Description: `The resource that configures and manages this BGP peer. + +* 'MANAGED_BY_USER' is the default value and can be managed by +you or other users +* 'MANAGED_BY_ATTACHMENT' is a BGP peer that is configured and +managed by Cloud Interconnect, specifically by an +InterconnectAttachment of type PARTNER. Google automatically +creates, updates, and deletes this type of BGP peer when the +PARTNER InterconnectAttachment is created, updated, +or deleted.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputeRouterBgpPeerCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandNestedComputeRouterBgpPeerName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + interfaceNameProp, err := expandNestedComputeRouterBgpPeerInterface(d.Get("interface"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("interface"); !tpgresource.IsEmptyValue(reflect.ValueOf(interfaceNameProp)) && (ok || !reflect.DeepEqual(v, interfaceNameProp)) { + obj["interfaceName"] = interfaceNameProp + } + ipAddressProp, err := expandNestedComputeRouterBgpPeerIpAddress(d.Get("ip_address"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ip_address"); !tpgresource.IsEmptyValue(reflect.ValueOf(ipAddressProp)) && (ok || !reflect.DeepEqual(v, ipAddressProp)) { + obj["ipAddress"] = ipAddressProp + } + peerIpAddressProp, err := expandNestedComputeRouterBgpPeerPeerIpAddress(d.Get("peer_ip_address"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("peer_ip_address"); !tpgresource.IsEmptyValue(reflect.ValueOf(peerIpAddressProp)) && (ok || !reflect.DeepEqual(v, peerIpAddressProp)) { + obj["peerIpAddress"] = peerIpAddressProp + } + peerAsnProp, err := expandNestedComputeRouterBgpPeerPeerAsn(d.Get("peer_asn"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("peer_asn"); !tpgresource.IsEmptyValue(reflect.ValueOf(peerAsnProp)) && (ok || !reflect.DeepEqual(v, peerAsnProp)) { + obj["peerAsn"] = peerAsnProp + } + advertisedRoutePriorityProp, err := expandNestedComputeRouterBgpPeerAdvertisedRoutePriority(d.Get("advertised_route_priority"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("advertised_route_priority"); ok || !reflect.DeepEqual(v, advertisedRoutePriorityProp) { + obj["advertisedRoutePriority"] = advertisedRoutePriorityProp + } + advertiseModeProp, err := expandNestedComputeRouterBgpPeerAdvertiseMode(d.Get("advertise_mode"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("advertise_mode"); !tpgresource.IsEmptyValue(reflect.ValueOf(advertiseModeProp)) && (ok || !reflect.DeepEqual(v, advertiseModeProp)) { + obj["advertiseMode"] = advertiseModeProp + } + advertisedGroupsProp, err := expandNestedComputeRouterBgpPeerAdvertisedGroups(d.Get("advertised_groups"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("advertised_groups"); ok || !reflect.DeepEqual(v, advertisedGroupsProp) { + obj["advertisedGroups"] = advertisedGroupsProp + } + advertisedIpRangesProp, err := expandNestedComputeRouterBgpPeerAdvertisedIpRanges(d.Get("advertised_ip_ranges"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("advertised_ip_ranges"); ok || !reflect.DeepEqual(v, advertisedIpRangesProp) { + obj["advertisedIpRanges"] = advertisedIpRangesProp + } + bfdProp, err := expandNestedComputeRouterBgpPeerBfd(d.Get("bfd"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("bfd"); !tpgresource.IsEmptyValue(reflect.ValueOf(bfdProp)) && (ok || !reflect.DeepEqual(v, bfdProp)) { + obj["bfd"] = bfdProp + } + enableProp, err := expandNestedComputeRouterBgpPeerEnable(d.Get("enable"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enable"); ok || !reflect.DeepEqual(v, enableProp) { + obj["enable"] = enableProp + } + routerApplianceInstanceProp, err := expandNestedComputeRouterBgpPeerRouterApplianceInstance(d.Get("router_appliance_instance"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("router_appliance_instance"); !tpgresource.IsEmptyValue(reflect.ValueOf(routerApplianceInstanceProp)) && (ok || !reflect.DeepEqual(v, routerApplianceInstanceProp)) { + obj["routerApplianceInstance"] = routerApplianceInstanceProp + } + enableIpv6Prop, err := expandNestedComputeRouterBgpPeerEnableIpv6(d.Get("enable_ipv6"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enable_ipv6"); ok || !reflect.DeepEqual(v, enableIpv6Prop) { + obj["enableIpv6"] = enableIpv6Prop + } + ipv6NexthopAddressProp, err := expandNestedComputeRouterBgpPeerIpv6NexthopAddress(d.Get("ipv6_nexthop_address"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ipv6_nexthop_address"); !tpgresource.IsEmptyValue(reflect.ValueOf(ipv6NexthopAddressProp)) && (ok || !reflect.DeepEqual(v, ipv6NexthopAddressProp)) { + obj["ipv6NexthopAddress"] = ipv6NexthopAddressProp + } + peerIpv6NexthopAddressProp, err := expandNestedComputeRouterBgpPeerPeerIpv6NexthopAddress(d.Get("peer_ipv6_nexthop_address"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("peer_ipv6_nexthop_address"); !tpgresource.IsEmptyValue(reflect.ValueOf(peerIpv6NexthopAddressProp)) && (ok || !reflect.DeepEqual(v, peerIpv6NexthopAddressProp)) { + obj["peerIpv6NexthopAddress"] = peerIpv6NexthopAddressProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "router/{{region}}/{{router}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/routers/{{router}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new RouterBgpPeer: %#v", obj) + + obj, err = resourceComputeRouterBgpPeerPatchCreateEncoder(d, meta, obj) + if err != nil { + return err + } + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for RouterBgpPeer: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating RouterBgpPeer: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/routers/{{router}}/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating RouterBgpPeer", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create RouterBgpPeer: %s", err) + } + + log.Printf("[DEBUG] Finished creating RouterBgpPeer %q: %#v", d.Id(), res) + + return resourceComputeRouterBgpPeerRead(d, meta) +} + +func resourceComputeRouterBgpPeerRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/routers/{{router}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for RouterBgpPeer: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeRouterBgpPeer %q", d.Id())) + } + + res, err = flattenNestedComputeRouterBgpPeer(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Object isn't there any more - remove it from the state. + log.Printf("[DEBUG] Removing ComputeRouterBgpPeer because it couldn't be matched.") + d.SetId("") + return nil + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading RouterBgpPeer: %s", err) + } + + if err := d.Set("name", flattenNestedComputeRouterBgpPeerName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading RouterBgpPeer: %s", err) + } + if err := d.Set("interface", flattenNestedComputeRouterBgpPeerInterface(res["interfaceName"], d, config)); err != nil { + return fmt.Errorf("Error reading RouterBgpPeer: %s", err) + } + if err := d.Set("ip_address", flattenNestedComputeRouterBgpPeerIpAddress(res["ipAddress"], d, config)); err != nil { + return fmt.Errorf("Error reading RouterBgpPeer: %s", err) + } + if err := d.Set("peer_ip_address", flattenNestedComputeRouterBgpPeerPeerIpAddress(res["peerIpAddress"], d, config)); err != nil { + return fmt.Errorf("Error reading RouterBgpPeer: %s", err) + } + if err := d.Set("peer_asn", flattenNestedComputeRouterBgpPeerPeerAsn(res["peerAsn"], d, config)); err != nil { + return fmt.Errorf("Error reading RouterBgpPeer: %s", err) + } + if err := d.Set("advertised_route_priority", flattenNestedComputeRouterBgpPeerAdvertisedRoutePriority(res["advertisedRoutePriority"], d, config)); err != nil { + return fmt.Errorf("Error reading RouterBgpPeer: %s", err) + } + if err := d.Set("advertise_mode", flattenNestedComputeRouterBgpPeerAdvertiseMode(res["advertiseMode"], d, config)); err != nil { + return fmt.Errorf("Error reading RouterBgpPeer: %s", err) + } + if err := d.Set("advertised_groups", flattenNestedComputeRouterBgpPeerAdvertisedGroups(res["advertisedGroups"], d, config)); err != nil { + return fmt.Errorf("Error reading RouterBgpPeer: %s", err) + } + if err := d.Set("advertised_ip_ranges", flattenNestedComputeRouterBgpPeerAdvertisedIpRanges(res["advertisedIpRanges"], d, config)); err != nil { + return fmt.Errorf("Error reading RouterBgpPeer: %s", err) + } + if err := d.Set("management_type", flattenNestedComputeRouterBgpPeerManagementType(res["managementType"], d, config)); err != nil { + return fmt.Errorf("Error reading RouterBgpPeer: %s", err) + } + if err := d.Set("bfd", flattenNestedComputeRouterBgpPeerBfd(res["bfd"], d, config)); err != nil { + return fmt.Errorf("Error reading RouterBgpPeer: %s", err) + } + if err := d.Set("enable", flattenNestedComputeRouterBgpPeerEnable(res["enable"], d, config)); err != nil { + return fmt.Errorf("Error reading RouterBgpPeer: %s", err) + } + if err := d.Set("router_appliance_instance", flattenNestedComputeRouterBgpPeerRouterApplianceInstance(res["routerApplianceInstance"], d, config)); err != nil { + return fmt.Errorf("Error reading RouterBgpPeer: %s", err) + } + if err := d.Set("enable_ipv6", flattenNestedComputeRouterBgpPeerEnableIpv6(res["enableIpv6"], d, config)); err != nil { + return fmt.Errorf("Error reading RouterBgpPeer: %s", err) + } + if err := d.Set("ipv6_nexthop_address", flattenNestedComputeRouterBgpPeerIpv6NexthopAddress(res["ipv6NexthopAddress"], d, config)); err != nil { + return fmt.Errorf("Error reading RouterBgpPeer: %s", err) + } + if err := d.Set("peer_ipv6_nexthop_address", flattenNestedComputeRouterBgpPeerPeerIpv6NexthopAddress(res["peerIpv6NexthopAddress"], d, config)); err != nil { + return fmt.Errorf("Error reading RouterBgpPeer: %s", err) + } + + return nil +} + +func resourceComputeRouterBgpPeerUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for RouterBgpPeer: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + ipAddressProp, err := expandNestedComputeRouterBgpPeerIpAddress(d.Get("ip_address"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ip_address"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, ipAddressProp)) { + obj["ipAddress"] = ipAddressProp + } + peerIpAddressProp, err := expandNestedComputeRouterBgpPeerPeerIpAddress(d.Get("peer_ip_address"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("peer_ip_address"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, peerIpAddressProp)) { + obj["peerIpAddress"] = peerIpAddressProp + } + peerAsnProp, err := expandNestedComputeRouterBgpPeerPeerAsn(d.Get("peer_asn"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("peer_asn"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, peerAsnProp)) { + obj["peerAsn"] = peerAsnProp + } + advertisedRoutePriorityProp, err := expandNestedComputeRouterBgpPeerAdvertisedRoutePriority(d.Get("advertised_route_priority"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("advertised_route_priority"); ok || !reflect.DeepEqual(v, advertisedRoutePriorityProp) { + obj["advertisedRoutePriority"] = advertisedRoutePriorityProp + } + advertiseModeProp, err := expandNestedComputeRouterBgpPeerAdvertiseMode(d.Get("advertise_mode"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("advertise_mode"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, advertiseModeProp)) { + obj["advertiseMode"] = advertiseModeProp + } + advertisedGroupsProp, err := expandNestedComputeRouterBgpPeerAdvertisedGroups(d.Get("advertised_groups"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("advertised_groups"); ok || !reflect.DeepEqual(v, advertisedGroupsProp) { + obj["advertisedGroups"] = advertisedGroupsProp + } + advertisedIpRangesProp, err := expandNestedComputeRouterBgpPeerAdvertisedIpRanges(d.Get("advertised_ip_ranges"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("advertised_ip_ranges"); ok || !reflect.DeepEqual(v, advertisedIpRangesProp) { + obj["advertisedIpRanges"] = advertisedIpRangesProp + } + bfdProp, err := expandNestedComputeRouterBgpPeerBfd(d.Get("bfd"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("bfd"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, bfdProp)) { + obj["bfd"] = bfdProp + } + enableProp, err := expandNestedComputeRouterBgpPeerEnable(d.Get("enable"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enable"); ok || !reflect.DeepEqual(v, enableProp) { + obj["enable"] = enableProp + } + routerApplianceInstanceProp, err := expandNestedComputeRouterBgpPeerRouterApplianceInstance(d.Get("router_appliance_instance"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("router_appliance_instance"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, routerApplianceInstanceProp)) { + obj["routerApplianceInstance"] = routerApplianceInstanceProp + } + enableIpv6Prop, err := expandNestedComputeRouterBgpPeerEnableIpv6(d.Get("enable_ipv6"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enable_ipv6"); ok || !reflect.DeepEqual(v, enableIpv6Prop) { + obj["enableIpv6"] = enableIpv6Prop + } + ipv6NexthopAddressProp, err := expandNestedComputeRouterBgpPeerIpv6NexthopAddress(d.Get("ipv6_nexthop_address"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ipv6_nexthop_address"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, ipv6NexthopAddressProp)) { + obj["ipv6NexthopAddress"] = ipv6NexthopAddressProp + } + peerIpv6NexthopAddressProp, err := expandNestedComputeRouterBgpPeerPeerIpv6NexthopAddress(d.Get("peer_ipv6_nexthop_address"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("peer_ipv6_nexthop_address"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, peerIpv6NexthopAddressProp)) { + obj["peerIpv6NexthopAddress"] = peerIpv6NexthopAddressProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "router/{{region}}/{{router}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/routers/{{router}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating RouterBgpPeer %q: %#v", d.Id(), obj) + + obj, err = resourceComputeRouterBgpPeerPatchUpdateEncoder(d, meta, obj) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating RouterBgpPeer %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating RouterBgpPeer %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating RouterBgpPeer", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceComputeRouterBgpPeerRead(d, meta) +} + +func resourceComputeRouterBgpPeerDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for RouterBgpPeer: %s", err) + } + billingProject = project + + lockName, err := tpgresource.ReplaceVars(d, config, "router/{{region}}/{{router}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/routers/{{router}}") + if err != nil { + return err + } + + var obj map[string]interface{} + + obj, err = resourceComputeRouterBgpPeerPatchDeleteEncoder(d, meta, obj) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "RouterBgpPeer") + } + log.Printf("[DEBUG] Deleting RouterBgpPeer %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "RouterBgpPeer") + } + + err = ComputeOperationWaitTime( + config, res, project, "Deleting RouterBgpPeer", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting RouterBgpPeer %q: %#v", d.Id(), res) + return nil +} + +func resourceComputeRouterBgpPeerImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "^projects/(?P[^/]+)/regions/(?P[^/]+)/routers/(?P[^/]+)/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)$", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/routers/{{router}}/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenNestedComputeRouterBgpPeerName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedComputeRouterBgpPeerInterface(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedComputeRouterBgpPeerIpAddress(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedComputeRouterBgpPeerPeerIpAddress(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedComputeRouterBgpPeerPeerAsn(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenNestedComputeRouterBgpPeerAdvertisedRoutePriority(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenNestedComputeRouterBgpPeerAdvertiseMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil || tpgresource.IsEmptyValue(reflect.ValueOf(v)) { + return "DEFAULT" + } + + return v +} + +func flattenNestedComputeRouterBgpPeerAdvertisedGroups(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedComputeRouterBgpPeerAdvertisedIpRanges(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "range": flattenNestedComputeRouterBgpPeerAdvertisedIpRangesRange(original["range"], d, config), + "description": flattenNestedComputeRouterBgpPeerAdvertisedIpRangesDescription(original["description"], d, config), + }) + } + return transformed +} +func flattenNestedComputeRouterBgpPeerAdvertisedIpRangesRange(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedComputeRouterBgpPeerAdvertisedIpRangesDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedComputeRouterBgpPeerManagementType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedComputeRouterBgpPeerBfd(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["session_initialization_mode"] = + flattenNestedComputeRouterBgpPeerBfdSessionInitializationMode(original["sessionInitializationMode"], d, config) + transformed["min_transmit_interval"] = + flattenNestedComputeRouterBgpPeerBfdMinTransmitInterval(original["minTransmitInterval"], d, config) + transformed["min_receive_interval"] = + flattenNestedComputeRouterBgpPeerBfdMinReceiveInterval(original["minReceiveInterval"], d, config) + transformed["multiplier"] = + flattenNestedComputeRouterBgpPeerBfdMultiplier(original["multiplier"], d, config) + return []interface{}{transformed} +} +func flattenNestedComputeRouterBgpPeerBfdSessionInitializationMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedComputeRouterBgpPeerBfdMinTransmitInterval(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenNestedComputeRouterBgpPeerBfdMinReceiveInterval(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenNestedComputeRouterBgpPeerBfdMultiplier(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenNestedComputeRouterBgpPeerEnable(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return true + } + b, err := strconv.ParseBool(v.(string)) + if err != nil { + // If we can't convert it into a bool return value as is and let caller handle it + return v + } + return b +} + +func flattenNestedComputeRouterBgpPeerRouterApplianceInstance(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func flattenNestedComputeRouterBgpPeerEnableIpv6(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedComputeRouterBgpPeerIpv6NexthopAddress(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedComputeRouterBgpPeerPeerIpv6NexthopAddress(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandNestedComputeRouterBgpPeerName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeRouterBgpPeerInterface(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeRouterBgpPeerIpAddress(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeRouterBgpPeerPeerIpAddress(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeRouterBgpPeerPeerAsn(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeRouterBgpPeerAdvertisedRoutePriority(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeRouterBgpPeerAdvertiseMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeRouterBgpPeerAdvertisedGroups(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeRouterBgpPeerAdvertisedIpRanges(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedRange, err := expandNestedComputeRouterBgpPeerAdvertisedIpRangesRange(original["range"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRange); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["range"] = transformedRange + } + + transformedDescription, err := expandNestedComputeRouterBgpPeerAdvertisedIpRangesDescription(original["description"], d, config) + if err != nil { + return nil, err + } else { + transformed["description"] = transformedDescription + } + + req = append(req, transformed) + } + return req, nil +} + +func expandNestedComputeRouterBgpPeerAdvertisedIpRangesRange(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeRouterBgpPeerAdvertisedIpRangesDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeRouterBgpPeerBfd(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedSessionInitializationMode, err := expandNestedComputeRouterBgpPeerBfdSessionInitializationMode(original["session_initialization_mode"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSessionInitializationMode); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sessionInitializationMode"] = transformedSessionInitializationMode + } + + transformedMinTransmitInterval, err := expandNestedComputeRouterBgpPeerBfdMinTransmitInterval(original["min_transmit_interval"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinTransmitInterval); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["minTransmitInterval"] = transformedMinTransmitInterval + } + + transformedMinReceiveInterval, err := expandNestedComputeRouterBgpPeerBfdMinReceiveInterval(original["min_receive_interval"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinReceiveInterval); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["minReceiveInterval"] = transformedMinReceiveInterval + } + + transformedMultiplier, err := expandNestedComputeRouterBgpPeerBfdMultiplier(original["multiplier"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMultiplier); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["multiplier"] = transformedMultiplier + } + + return transformed, nil +} + +func expandNestedComputeRouterBgpPeerBfdSessionInitializationMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeRouterBgpPeerBfdMinTransmitInterval(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeRouterBgpPeerBfdMinReceiveInterval(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeRouterBgpPeerBfdMultiplier(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeRouterBgpPeerEnable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + if v == nil { + return nil, nil + } + + return strings.ToUpper(strconv.FormatBool(v.(bool))), nil +} + +func expandNestedComputeRouterBgpPeerRouterApplianceInstance(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseZonalFieldValue("instances", v.(string), "project", "zone", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for router_appliance_instance: %s", err) + } + return f.RelativeLink(), nil +} + +func expandNestedComputeRouterBgpPeerEnableIpv6(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeRouterBgpPeerIpv6NexthopAddress(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeRouterBgpPeerPeerIpv6NexthopAddress(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func flattenNestedComputeRouterBgpPeer(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + var v interface{} + var ok bool + + v, ok = res["bgpPeers"] + if !ok || v == nil { + return nil, nil + } + + switch v.(type) { + case []interface{}: + break + case map[string]interface{}: + // Construct list out of single nested resource + v = []interface{}{v} + default: + return nil, fmt.Errorf("expected list or map for value bgpPeers. Actual value: %v", v) + } + + _, item, err := resourceComputeRouterBgpPeerFindNestedObjectInList(d, meta, v.([]interface{})) + if err != nil { + return nil, err + } + return item, nil +} + +func resourceComputeRouterBgpPeerFindNestedObjectInList(d *schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { + expectedName, err := expandNestedComputeRouterBgpPeerName(d.Get("name"), d, meta.(*transport_tpg.Config)) + if err != nil { + return -1, nil, err + } + expectedFlattenedName := flattenNestedComputeRouterBgpPeerName(expectedName, d, meta.(*transport_tpg.Config)) + + // Search list for this resource. + for idx, itemRaw := range items { + if itemRaw == nil { + continue + } + item := itemRaw.(map[string]interface{}) + + itemName := flattenNestedComputeRouterBgpPeerName(item["name"], d, meta.(*transport_tpg.Config)) + // IsEmptyValue check so that if one is nil and the other is "", that's considered a match + if !(tpgresource.IsEmptyValue(reflect.ValueOf(itemName)) && tpgresource.IsEmptyValue(reflect.ValueOf(expectedFlattenedName))) && !reflect.DeepEqual(itemName, expectedFlattenedName) { + log.Printf("[DEBUG] Skipping item with name= %#v, looking for %#v)", itemName, expectedFlattenedName) + continue + } + log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) + return idx, item, nil + } + return -1, nil, nil +} + +// PatchCreateEncoder handles creating request data to PATCH parent resource +// with list including new object. +func resourceComputeRouterBgpPeerPatchCreateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + currItems, err := resourceComputeRouterBgpPeerListForPatch(d, meta) + if err != nil { + return nil, err + } + + _, found, err := resourceComputeRouterBgpPeerFindNestedObjectInList(d, meta, currItems) + if err != nil { + return nil, err + } + + // Return error if item already created. + if found != nil { + return nil, fmt.Errorf("Unable to create RouterBgpPeer, existing object already found: %+v", found) + } + + // Return list with the resource to create appended + res := map[string]interface{}{ + "bgpPeers": append(currItems, obj), + } + + return res, nil +} + +// PatchUpdateEncoder handles creating request data to PATCH parent resource +// with list including updated object. +func resourceComputeRouterBgpPeerPatchUpdateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + items, err := resourceComputeRouterBgpPeerListForPatch(d, meta) + if err != nil { + return nil, err + } + + idx, item, err := resourceComputeRouterBgpPeerFindNestedObjectInList(d, meta, items) + if err != nil { + return nil, err + } + + // Return error if item to update does not exist. + if item == nil { + return nil, fmt.Errorf("Unable to update RouterBgpPeer %q - not found in list", d.Id()) + } + + // Merge new object into old. + for k, v := range obj { + item[k] = v + } + items[idx] = item + + // Return list with new item added + res := map[string]interface{}{ + "bgpPeers": items, + } + + return res, nil +} + +// PatchDeleteEncoder handles creating request data to PATCH parent resource +// with list excluding object to delete. +func resourceComputeRouterBgpPeerPatchDeleteEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + currItems, err := resourceComputeRouterBgpPeerListForPatch(d, meta) + if err != nil { + return nil, err + } + + idx, item, err := resourceComputeRouterBgpPeerFindNestedObjectInList(d, meta, currItems) + if err != nil { + return nil, err + } + if item == nil { + // Spoof 404 error for proper handling by Delete (i.e. no-op) + return nil, tpgresource.Fake404("nested", "ComputeRouterBgpPeer") + } + + updatedItems := append(currItems[:idx], currItems[idx+1:]...) + res := map[string]interface{}{ + "bgpPeers": updatedItems, + } + + return res, nil +} + +// ListForPatch handles making API request to get parent resource and +// extracting list of objects. +func resourceComputeRouterBgpPeerListForPatch(d *schema.ResourceData, meta interface{}) ([]interface{}, error) { + config := meta.(*transport_tpg.Config) + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/routers/{{router}}") + if err != nil { + return nil, err + } + project, err := tpgresource.GetProject(d, config) + if err != nil { + return nil, err + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return nil, err + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return nil, err + } + + var v interface{} + var ok bool + + v, ok = res["bgpPeers"] + if ok && v != nil { + ls, lsOk := v.([]interface{}) + if !lsOk { + return nil, fmt.Errorf(`expected list for nested field "bgpPeers"`) + } + return ls, nil + } + return nil, nil +} diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_router_peer_sweeper.go b/mmv1/third_party/terraform/services/compute/resource_compute_router_peer_sweeper.go new file mode 100644 index 000000000000..c174a55376d3 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/resource_compute_router_peer_sweeper.go @@ -0,0 +1,122 @@ +package compute + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("ComputeRouterBgpPeer", testSweepComputeRouterBgpPeer) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputeRouterBgpPeer(region string) error { + resourceName := "ComputeRouterBgpPeer" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://compute.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/routers/{{router}}", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["bgpPeers"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://compute.googleapis.com/compute/v1/projects/{{project}}/regions/{{region}}/routers/{{router}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_router_peer_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_router_peer_test.go.erb new file mode 100644 index 000000000000..e9545df09dcc --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/resource_compute_router_peer_test.go.erb @@ -0,0 +1,188 @@ +<% autogen_exception -%> +package compute_test + +import ( + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func TestAccComputeRouterBgpPeer_routerPeerRouterAppliance(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRouterBgpPeerDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRouterBgpPeer_routerPeerRouterAppliance(context), + }, + { + ResourceName: "google_compute_router_peer.peer", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"router_appliance_instance", "router", "region"}, + }, + }, + }) +} + +func testAccComputeRouterBgpPeer_routerPeerRouterAppliance(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_network" "network" { + name = "tf-test-my-router%{random_suffix}-net" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "subnetwork" { + name = "tf-test-my-router%{random_suffix}-sub" + network = google_compute_network.network.self_link + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" +} + +resource "google_compute_address" "addr_intf" { + name = "tf-test-my-router%{random_suffix}-addr-intf" + region = google_compute_subnetwork.subnetwork.region + subnetwork = google_compute_subnetwork.subnetwork.id + address_type = "INTERNAL" +} + +resource "google_compute_address" "addr_intf_redundant" { + name = "tf-test-my-router%{random_suffix}-addr-intf-red" + region = google_compute_subnetwork.subnetwork.region + subnetwork = google_compute_subnetwork.subnetwork.id + address_type = "INTERNAL" +} + +resource "google_compute_address" "addr_peer" { + name = "tf-test-my-router%{random_suffix}-addr-peer" + region = google_compute_subnetwork.subnetwork.region + subnetwork = google_compute_subnetwork.subnetwork.id + address_type = "INTERNAL" +} + +resource "google_compute_instance" "instance" { + name = "router-appliance" + zone = "us-central1-a" + machine_type = "e2-medium" + can_ip_forward = true + + boot_disk { + initialize_params { + image = "debian-cloud/debian-11" + } + } + + network_interface { + network_ip = google_compute_address.addr_peer.address + subnetwork = google_compute_subnetwork.subnetwork.self_link + } +} + +resource "google_network_connectivity_hub" "hub" { + name = "tf-test-my-router%{random_suffix}-hub" +} + +resource "google_network_connectivity_spoke" "spoke" { + name = "tf-test-my-router%{random_suffix}-spoke" + location = google_compute_subnetwork.subnetwork.region + hub = google_network_connectivity_hub.hub.id + + linked_router_appliance_instances { + instances { + virtual_machine = google_compute_instance.instance.self_link + ip_address = google_compute_address.addr_peer.address + } + site_to_site_data_transfer = false + } +} + +resource "google_compute_router" "router" { + name = "tf-test-my-router%{random_suffix}-router" + region = google_compute_subnetwork.subnetwork.region + network = google_compute_network.network.self_link + bgp { + asn = 64514 + } +} + +resource "google_compute_router_interface" "interface_redundant" { + name = "tf-test-my-router%{random_suffix}-intf-red" + region = google_compute_router.router.region + router = google_compute_router.router.name + subnetwork = google_compute_subnetwork.subnetwork.self_link + private_ip_address = google_compute_address.addr_intf_redundant.address +} + +resource "google_compute_router_interface" "interface" { + name = "tf-test-my-router%{random_suffix}-intf" + region = google_compute_router.router.region + router = google_compute_router.router.name + subnetwork = google_compute_subnetwork.subnetwork.self_link + private_ip_address = google_compute_address.addr_intf.address + redundant_interface = google_compute_router_interface.interface_redundant.name +} + +resource "google_compute_router_peer" "peer" { + name = "tf-test-my-router-peer%{random_suffix}" + router = google_compute_router.router.name + region = google_compute_router.router.region + interface = google_compute_router_interface.interface.name + router_appliance_instance = google_compute_instance.instance.self_link + peer_asn = 65513 + peer_ip_address = google_compute_address.addr_peer.address +} +`, context) +} + +func testAccCheckComputeRouterBgpPeerDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_router_peer" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + url, err := tpgresource.ReplaceVarsForTest(config, rs, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/routers/{{router}}") + if err != nil { + return err + } + + billingProject := "" + + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: config.UserAgent, + }) + if err == nil { + return fmt.Errorf("ComputeRouterBgpPeer still exists at %s", url) + } + } + + return nil + } +} diff --git a/mmv1/third_party/terraform/website/docs/r/compute_router_peer.html.markdown b/mmv1/third_party/terraform/website/docs/r/compute_router_peer.html.markdown new file mode 100644 index 000000000000..b5d755cb8261 --- /dev/null +++ b/mmv1/third_party/terraform/website/docs/r/compute_router_peer.html.markdown @@ -0,0 +1,410 @@ +--- +subcategory: "Compute Engine" +description: |- + BGP information that must be configured into the routing stack to + establish BGP peering. +--- + +# google\_compute\_router\_peer + +BGP information that must be configured into the routing stack to +establish BGP peering. This information must specify the peer ASN +and either the interface name, IP address, or peer IP address. +Please refer to RFC4273. + + +To get more information about RouterBgpPeer, see: + +* [API documentation](https://cloud.google.com/compute/docs/reference/rest/v1/routers) +* How-to Guides + * [Google Cloud Router](https://cloud.google.com/router/docs/) + +## Example Usage - Router Peer Basic + + +```hcl +resource "google_compute_router_peer" "peer" { + name = "my-router-peer" + router = "my-router" + region = "us-central1" + peer_asn = 65513 + advertised_route_priority = 100 + interface = "interface-1" +} +``` +## Example Usage - Router Peer Disabled + + +```hcl +resource "google_compute_router_peer" "peer" { + name = "my-router-peer" + router = "my-router" + region = "us-central1" + peer_ip_address = "169.254.1.2" + peer_asn = 65513 + advertised_route_priority = 100 + interface = "interface-1" + enable = false +} +``` +## Example Usage - Router Peer Bfd + + +```hcl +resource "google_compute_router_peer" "peer" { + name = "my-router-peer" + router = "my-router" + region = "us-central1" + peer_ip_address = "169.254.1.2" + peer_asn = 65513 + advertised_route_priority = 100 + interface = "interface-1" + + bfd { + min_receive_interval = 1000 + min_transmit_interval = 1000 + multiplier = 5 + session_initialization_mode = "ACTIVE" + } +} +``` + +## Example Usage - Router Peer Router Appliance + + +```hcl +resource "google_compute_network" "network" { + name = "my-router-net" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "subnetwork" { + name = "my-router-sub" + network = google_compute_network.network.self_link + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" +} + +resource "google_compute_address" "addr_intf" { + name = "my-router-addr-intf" + region = google_compute_subnetwork.subnetwork.region + subnetwork = google_compute_subnetwork.subnetwork.id + address_type = "INTERNAL" +} + +resource "google_compute_address" "addr_intf_redundant" { + name = "my-router-addr-intf-red" + region = google_compute_subnetwork.subnetwork.region + subnetwork = google_compute_subnetwork.subnetwork.id + address_type = "INTERNAL" +} + +resource "google_compute_address" "addr_peer" { + name = "my-router-addr-peer" + region = google_compute_subnetwork.subnetwork.region + subnetwork = google_compute_subnetwork.subnetwork.id + address_type = "INTERNAL" +} + +resource "google_compute_instance" "instance" { + name = "router-appliance" + zone = "us-central1-a" + machine_type = "e2-medium" + can_ip_forward = true + + boot_disk { + initialize_params { + image = "debian-cloud/debian-11" + } + } + + network_interface { + network_ip = google_compute_address.addr_peer.address + subnetwork = google_compute_subnetwork.subnetwork.self_link + } +} + +resource "google_network_connectivity_hub" "hub" { + name = "my-router-hub" +} + +resource "google_network_connectivity_spoke" "spoke" { + name = "my-router-spoke" + location = google_compute_subnetwork.subnetwork.region + hub = google_network_connectivity_hub.hub.id + + linked_router_appliance_instances { + instances { + virtual_machine = google_compute_instance.instance.self_link + ip_address = google_compute_address.addr_peer.address + } + site_to_site_data_transfer = false + } +} + +resource "google_compute_router" "router" { + name = "my-router-router" + region = google_compute_subnetwork.subnetwork.region + network = google_compute_network.network.self_link + bgp { + asn = 64514 + } +} + +resource "google_compute_router_interface" "interface_redundant" { + name = "my-router-intf-red" + region = google_compute_router.router.region + router = google_compute_router.router.name + subnetwork = google_compute_subnetwork.subnetwork.self_link + private_ip_address = google_compute_address.addr_intf_redundant.address +} + +resource "google_compute_router_interface" "interface" { + name = "my-router-intf" + region = google_compute_router.router.region + router = google_compute_router.router.name + subnetwork = google_compute_subnetwork.subnetwork.self_link + private_ip_address = google_compute_address.addr_intf.address + redundant_interface = google_compute_router_interface.interface_redundant.name +} + +resource "google_compute_router_peer" "peer" { + name = "my-router-peer" + router = google_compute_router.router.name + region = google_compute_router.router.region + interface = google_compute_router_interface.interface.name + router_appliance_instance = google_compute_instance.instance.self_link + peer_asn = 65513 + peer_ip_address = google_compute_address.addr_peer.address +} +``` + +## Argument Reference + +The following arguments are supported: + + +* `name` - + (Required) + Name of this BGP peer. The name must be 1-63 characters long, + and comply with RFC1035. Specifically, the name must be 1-63 characters + long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which + means the first character must be a lowercase letter, and all + following characters must be a dash, lowercase letter, or digit, + except the last character, which cannot be a dash. + +* `interface` - + (Required) + Name of the interface the BGP peer is associated with. + +* `peer_asn` - + (Required) + Peer BGP Autonomous System Number (ASN). + Each BGP interface may use a different value. + +* `router` - + (Required) + The name of the Cloud Router in which this BgpPeer will be configured. + + +- - - + + +* `ip_address` - + (Optional) + IP address of the interface inside Google Cloud Platform. + Only IPv4 is supported. + +* `peer_ip_address` - + (Optional) + IP address of the BGP interface outside Google Cloud Platform. + Only IPv4 is supported. Required if `ip_address` is set. + +* `advertised_route_priority` - + (Optional) + The priority of routes advertised to this BGP peer. + Where there is more than one matching route of maximum + length, the routes with the lowest priority value win. + +* `advertise_mode` - + (Optional) + User-specified flag to indicate which mode to use for advertisement. + Valid values of this enum field are: `DEFAULT`, `CUSTOM` + Default value is `DEFAULT`. + Possible values are: `DEFAULT`, `CUSTOM`. + +* `advertised_groups` - + (Optional) + User-specified list of prefix groups to advertise in custom + mode, which currently supports the following option: + * `ALL_SUBNETS`: Advertises all of the router's own VPC subnets. + This excludes any routes learned for subnets that use VPC Network + Peering. + + Note that this field can only be populated if advertiseMode is `CUSTOM` + and overrides the list defined for the router (in the "bgp" message). + These groups are advertised in addition to any specified prefixes. + Leave this field blank to advertise no custom groups. + +* `advertised_ip_ranges` - + (Optional) + User-specified list of individual IP ranges to advertise in + custom mode. This field can only be populated if advertiseMode + is `CUSTOM` and is advertised to all peers of the router. These IP + ranges will be advertised in addition to any specified groups. + Leave this field blank to advertise no custom IP ranges. + Structure is [documented below](#nested_advertised_ip_ranges). + +* `bfd` - + (Optional) + BFD configuration for the BGP peering. + Structure is [documented below](#nested_bfd). + +* `enable` - + (Optional) + The status of the BGP peer connection. If set to false, any active session + with the peer is terminated and all associated routing information is removed. + If set to true, the peer connection can be established with routing information. + The default is true. + +* `router_appliance_instance` - + (Optional) + The URI of the VM instance that is used as third-party router appliances + such as Next Gen Firewalls, Virtual Routers, or Router Appliances. + The VM instance must be located in zones contained in the same region as + this Cloud Router. The VM instance is the peer side of the BGP session. + +* `enable_ipv6` - + (Optional) + Enable IPv6 traffic over BGP Peer. If not specified, it is disabled by default. + +* `ipv6_nexthop_address` - + (Optional) + IPv6 address of the interface inside Google Cloud Platform. + The address must be in the range 2600:2d00:0:2::/64 or 2600:2d00:0:3::/64. + If you do not specify the next hop addresses, Google Cloud automatically + assigns unused addresses from the 2600:2d00:0:2::/64 or 2600:2d00:0:3::/64 range for you. + +* `peer_ipv6_nexthop_address` - + (Optional) + IPv6 address of the BGP interface outside Google Cloud Platform. + The address must be in the range 2600:2d00:0:2::/64 or 2600:2d00:0:3::/64. + If you do not specify the next hop addresses, Google Cloud automatically + assigns unused addresses from the 2600:2d00:0:2::/64 or 2600:2d00:0:3::/64 range for you. + +* `region` - + (Optional) + Region where the router and BgpPeer reside. + If it is not provided, the provider region is used. + +* `project` - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + + +The `advertised_ip_ranges` block supports: + +* `range` - + (Required) + The IP range to advertise. The value must be a + CIDR-formatted string. + +* `description` - + (Optional) + User-specified description for the IP range. + +The `bfd` block supports: + +* `session_initialization_mode` - + (Required) + The BFD session initialization mode for this BGP peer. + If set to `ACTIVE`, the Cloud Router will initiate the BFD session + for this BGP peer. If set to `PASSIVE`, the Cloud Router will wait + for the peer router to initiate the BFD session for this BGP peer. + If set to `DISABLED`, BFD is disabled for this BGP peer. + Possible values are: `ACTIVE`, `DISABLED`, `PASSIVE`. + +* `min_transmit_interval` - + (Optional) + The minimum interval, in milliseconds, between BFD control packets + transmitted to the peer router. The actual value is negotiated + between the two routers and is equal to the greater of this value + and the corresponding receive interval of the other router. If set, + this value must be between 1000 and 30000. + +* `min_receive_interval` - + (Optional) + The minimum interval, in milliseconds, between BFD control packets + received from the peer router. The actual value is negotiated + between the two routers and is equal to the greater of this value + and the transmit interval of the other router. If set, this value + must be between 1000 and 30000. + +* `multiplier` - + (Optional) + The number of consecutive BFD packets that must be missed before + BFD declares that a peer is unavailable. If set, the value must + be a value between 5 and 16. + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `id` - an identifier for the resource with format `projects/{{project}}/regions/{{region}}/routers/{{router}}/{{name}}` + +* `management_type` - + The resource that configures and manages this BGP peer. + * `MANAGED_BY_USER` is the default value and can be managed by + you or other users + * `MANAGED_BY_ATTACHMENT` is a BGP peer that is configured and + managed by Cloud Interconnect, specifically by an + InterconnectAttachment of type PARTNER. Google automatically + creates, updates, and deletes this type of BGP peer when the + PARTNER InterconnectAttachment is created, updated, + or deleted. + + +## Timeouts + +This resource provides the following +[Timeouts](https://developer.hashicorp.com/terraform/plugin/sdkv2/resources/retries-and-customizable-timeouts) configuration options: + +- `create` - Default is 20 minutes. +- `update` - Default is 20 minutes. +- `delete` - Default is 20 minutes. + +## Import + + +RouterBgpPeer can be imported using any of these accepted formats: + +* `projects/{{project}}/regions/{{region}}/routers/{{router}}/{{name}}` +* `{{project}}/{{region}}/{{router}}/{{name}}` +* `{{region}}/{{router}}/{{name}}` +* `{{router}}/{{name}}` + + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import RouterBgpPeer using one of the formats above. For example: + +```tf +import { + id = "projects/{{project}}/regions/{{region}}/routers/{{router}}/{{name}}" + to = google_compute_router_peer.default +} +``` + +When using the [`terraform import` command](https://developer.hashicorp.com/terraform/cli/commands/import), RouterBgpPeer can be imported using one of the formats above. For example: + +``` +$ terraform import google_compute_router_peer.default projects/{{project}}/regions/{{region}}/routers/{{router}}/{{name}} +$ terraform import google_compute_router_peer.default {{project}}/{{region}}/{{router}}/{{name}} +$ terraform import google_compute_router_peer.default {{region}}/{{router}}/{{name}} +$ terraform import google_compute_router_peer.default {{router}}/{{name}} +``` + +## User Project Overrides + +This resource supports [User Project Overrides](https://registry.terraform.io/providers/hashicorp/google/latest/docs/guides/provider_reference#user_project_override). From cc5606ed9072cb2cea144c1769d601d71bdc8ed1 Mon Sep 17 00:00:00 2001 From: kautikdk <144651627+kautikdk@users.noreply.github.com> Date: Tue, 5 Dec 2023 21:27:55 +0000 Subject: [PATCH 14/44] Fix Adding Unexpected Conditions (#9547) * Fix Adding Unexpected Conditions * Adds no_age field * Modify no_age description and variables name. Adds relevant comments. * Fixes Build Failure * Fixes indent issues and removes unnecessary condition. * Modified condition to include case when no_age is not present in the file. --- .../storage/resource_storage_bucket.go.erb | 42 ++++-- .../resource_storage_bucket_test.go.erb | 126 ++++++++++++++++++ .../docs/r/storage_bucket.html.markdown | 2 + 3 files changed, 159 insertions(+), 11 deletions(-) diff --git a/mmv1/third_party/terraform/services/storage/resource_storage_bucket.go.erb b/mmv1/third_party/terraform/services/storage/resource_storage_bucket.go.erb index 7703e96c17f2..ca2ca5749529 100644 --- a/mmv1/third_party/terraform/services/storage/resource_storage_bucket.go.erb +++ b/mmv1/third_party/terraform/services/storage/resource_storage_bucket.go.erb @@ -217,6 +217,11 @@ func ResourceStorageBucket() *schema.Resource { Optional: true, Description: `Creation date of an object in RFC 3339 (e.g. 2017-06-13) to satisfy this condition.`, }, + "no_age": { + Type: schema.TypeBool, + Optional: true, + Description: `While set true, age value will be omitted.Required to set true when age is unset in the config file.`, + }, "with_state": { Type: schema.TypeString, Computed: true, @@ -1206,7 +1211,7 @@ func flattenBucketAutoclass(bucketAutoclass *storage.BucketAutoclass) []map[stri return autoclassList } -func flattenBucketLifecycle(lifecycle *storage.BucketLifecycle) []map[string]interface{} { +func flattenBucketLifecycle(d *schema.ResourceData, lifecycle *storage.BucketLifecycle) []map[string]interface{} { if lifecycle == nil || lifecycle.Rule == nil { return []map[string]interface{}{} } @@ -1216,7 +1221,7 @@ func flattenBucketLifecycle(lifecycle *storage.BucketLifecycle) []map[string]int for _, rule := range lifecycle.Rule { rules = append(rules, map[string]interface{}{ "action": schema.NewSet(resourceGCSBucketLifecycleRuleActionHash, []interface{}{flattenBucketLifecycleRuleAction(rule.Action)}), - "condition": schema.NewSet(resourceGCSBucketLifecycleRuleConditionHash, []interface{}{flattenBucketLifecycleRuleCondition(rule.Condition)}), + "condition": schema.NewSet(resourceGCSBucketLifecycleRuleConditionHash, []interface{}{flattenBucketLifecycleRuleCondition(d, rule.Condition)}), }) } @@ -1230,7 +1235,7 @@ func flattenBucketLifecycleRuleAction(action *storage.BucketLifecycleRuleAction) } } -func flattenBucketLifecycleRuleCondition(condition *storage.BucketLifecycleRuleCondition) map[string]interface{} { +func flattenBucketLifecycleRuleCondition(d *schema.ResourceData, condition *storage.BucketLifecycleRuleCondition) map[string]interface{} { ruleCondition := map[string]interface{}{ "created_before": condition.CreatedBefore, "matches_storage_class": tpgresource.ConvertStringArrToInterface(condition.MatchesStorageClass), @@ -1254,6 +1259,12 @@ func flattenBucketLifecycleRuleCondition(condition *storage.BucketLifecycleRuleC ruleCondition["with_state"] = "ARCHIVED" } } + // setting no_age value from state config since it is terraform only variable and not getting value from backend. + if v, ok := d.GetOk("lifecycle_rule.0.condition"); ok{ + state_condition := v.(*schema.Set).List()[0].(map[string]interface{}) + ruleCondition["no_age"] = state_condition["no_age"].(bool) + } + return ruleCondition } @@ -1401,11 +1412,14 @@ func expandStorageBucketLifecycleRuleCondition(v interface{}) (*storage.BucketLi condition := conditions[0].(map[string]interface{}) transformed := &storage.BucketLifecycleRuleCondition{} - - if v, ok := condition["age"]; ok { - age := int64(v.(int)) - transformed.Age = &age - transformed.ForceSendFields = append(transformed.ForceSendFields, "Age") + // Setting high precedence of no_age over age when both used together. + // Only sets age value when no_age is not present or no_age is present and has false value + if v, ok := condition["no_age"]; !ok || !(v.(bool)) { + if v, ok := condition["age"]; ok { + age := int64(v.(int)) + transformed.Age = &age + transformed.ForceSendFields = append(transformed.ForceSendFields, "Age") + } } if v, ok := condition["created_before"]; ok { @@ -1506,8 +1520,12 @@ func resourceGCSBucketLifecycleRuleConditionHash(v interface{}) int { var buf bytes.Buffer m := v.(map[string]interface{}) - if v, ok := m["age"]; ok { - buf.WriteString(fmt.Sprintf("%d-", v.(int))) + if v, ok := m["no_age"]; ok && v.(bool){ + buf.WriteString(fmt.Sprintf("%t-", v.(bool))) + } else { + if v, ok := m["age"]; ok { + buf.WriteString(fmt.Sprintf("%d-", v.(int))) + } } if v, ok := m["days_since_custom_time"]; ok { @@ -1650,7 +1668,9 @@ func setStorageBucket(d *schema.ResourceData, config *transport_tpg.Config, res if err := d.Set("autoclass", flattenBucketAutoclass(res.Autoclass)); err != nil { return fmt.Errorf("Error setting autoclass: %s", err) } - if err := d.Set("lifecycle_rule", flattenBucketLifecycle(res.Lifecycle)); err != nil { + // lifecycle_rule contains terraform only variable no_age. + // Passing config("d") to flattener function to set no_age separately. + if err := d.Set("lifecycle_rule", flattenBucketLifecycle(d,res.Lifecycle)); err != nil { return fmt.Errorf("Error setting lifecycle_rule: %s", err) } if err := tpgresource.SetLabels(res.Labels, d, "labels"); err != nil { diff --git a/mmv1/third_party/terraform/services/storage/resource_storage_bucket_test.go.erb b/mmv1/third_party/terraform/services/storage/resource_storage_bucket_test.go.erb index 9cbd43fe8aa7..09c92686525d 100644 --- a/mmv1/third_party/terraform/services/storage/resource_storage_bucket_test.go.erb +++ b/mmv1/third_party/terraform/services/storage/resource_storage_bucket_test.go.erb @@ -381,6 +381,74 @@ func TestAccStorageBucket_lifecycleRuleStateAny(t *testing.T) { }) } +func TestAccStorageBucket_lifecycleRulesNoAge(t *testing.T) { + t.Parallel() + var bucket storage.Bucket + bucketName := acctest.TestBucketName(t) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccStorageBucketDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccStorageBucket_customAttributes_withLifecycle1(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + t, "google_storage_bucket.bucket", bucketName, &bucket), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + { + Config: testAccStorageBucket_customAttributes_withLifecycleNoAge(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + t, "google_storage_bucket.bucket", bucketName, &bucket), + testAccCheckStorageBucketLifecycleConditionNoAge(nil, &bucket), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy","lifecycle_rule.0.condition.0.no_age"}, + }, + { + Config: testAccStorageBucket_customAttributes_withLifecycleNoAgeAndAge(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + t, "google_storage_bucket.bucket", bucketName, &bucket), + testAccCheckStorageBucketLifecycleConditionNoAge(nil, &bucket), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy","lifecycle_rule.0.condition.0.no_age"}, + }, + { + Config: testAccStorageBucket_customAttributes_withLifecycle1(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + t, "google_storage_bucket.bucket", bucketName, &bucket), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + }, + }) +} + func TestAccStorageBucket_storageClass(t *testing.T) { t.Parallel() @@ -1315,6 +1383,25 @@ func testAccCheckStorageBucketLifecycleConditionState(expected *bool, b *storage } } +func testAccCheckStorageBucketLifecycleConditionNoAge(expected *int64, b *storage.Bucket) resource.TestCheckFunc { + return func(s *terraform.State) error { + actual := b.Lifecycle.Rule[0].Condition.Age + if expected == nil && b.Lifecycle.Rule[0].Condition.Age== nil { + return nil + } + if expected == nil { + return fmt.Errorf("expected condition Age to be unset, instead got %d", *actual) + } + if actual == nil { + return fmt.Errorf("expected condition Age to be %d, instead got nil (unset)", *expected) + } + if *expected != *actual { + return fmt.Errorf("expected condition Age to be %d, instead got %d", *expected, *actual) + } + return nil + } +} + func testAccStorageBucketDestroyProducer(t *testing.T) func(s *terraform.State) error { return func(s *terraform.State) error { config := acctest.GoogleProviderConfig(t) @@ -1478,6 +1565,45 @@ resource "google_storage_bucket" "bucket" { `, bucketName) } +func testAccStorageBucket_customAttributes_withLifecycleNoAge(bucketName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "EU" + force_destroy = "true" + lifecycle_rule { + action { + type = "Delete" + } + condition { + num_newer_versions = 2 + no_age = true + } + } +} +`, bucketName) +} + +func testAccStorageBucket_customAttributes_withLifecycleNoAgeAndAge(bucketName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "EU" + force_destroy = "true" + lifecycle_rule { + action { + type = "Delete" + } + condition { + num_newer_versions = 2 + age = 10 + no_age = true + } + } +} +`, bucketName) +} + func testAccStorageBucket_storageClass(bucketName, storageClass, location string) string { return fmt.Sprintf(` resource "google_storage_bucket" "bucket" { diff --git a/mmv1/third_party/terraform/website/docs/r/storage_bucket.html.markdown b/mmv1/third_party/terraform/website/docs/r/storage_bucket.html.markdown index 0c2123fa25bf..0131446fffa8 100644 --- a/mmv1/third_party/terraform/website/docs/r/storage_bucket.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/storage_bucket.html.markdown @@ -147,6 +147,8 @@ The following arguments are supported: * `age` - (Optional) Minimum age of an object in days to satisfy this condition. +* `no_age` - (Optional) While set `true`, `age` value will be omitted. **Note** Required to set `true` when `age` is unset in the config file. + * `created_before` - (Optional) A date in the RFC 3339 format YYYY-MM-DD. This condition is satisfied when an object is created before midnight of the specified date in UTC. * `with_state` - (Optional) Match to live and/or archived objects. Unversioned buckets have only live objects. Supported values include: `"LIVE"`, `"ARCHIVED"`, `"ANY"`. From 12b126f452170f62a25048f808d9cc6d1e11b526 Mon Sep 17 00:00:00 2001 From: Karol Date: Wed, 6 Dec 2023 18:37:49 +0100 Subject: [PATCH 15/44] Add GKE support for tier 1 networking in GKE (#6826) --- .../resource_container_node_pool.go.erb | 40 ++++++++++++++-- .../resource_container_node_pool_test.go.erb | 46 ++++++++++++++++--- .../docs/r/container_cluster.html.markdown | 6 +++ 3 files changed, 83 insertions(+), 9 deletions(-) diff --git a/mmv1/third_party/terraform/services/container/resource_container_node_pool.go.erb b/mmv1/third_party/terraform/services/container/resource_container_node_pool.go.erb index 4350958d6a64..3ae142f56f20 100644 --- a/mmv1/third_party/terraform/services/container/resource_container_node_pool.go.erb +++ b/mmv1/third_party/terraform/services/container/resource_container_node_pool.go.erb @@ -464,6 +464,21 @@ var schemaNodePool = map[string]*schema.Schema{ }, }, }, + "network_performance_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Network bandwidth tier configuration.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "total_egress_bandwidth_tier": { + Type: schema.TypeString, + Required: true, + Description: `Specifies the total network bandwidth tier for the NodePool.`, + }, + }, + }, + }, }, }, }, @@ -1216,6 +1231,7 @@ func flattenNodeNetworkConfig(c *container.NodeNetworkConfig, d *schema.Resource "pod_range": c.PodRange, "enable_private_nodes": c.EnablePrivateNodes, "pod_cidr_overprovision_config": flattenPodCidrOverprovisionConfig(c.PodCidrOverprovisionConfig), + "network_performance_config": flattenNodeNetworkPerformanceConfig(c.NetworkPerformanceConfig), <% unless version == 'ga' -%> "additional_node_network_configs": flattenAdditionalNodeNetworkConfig(c.AdditionalNodeNetworkConfigs), "additional_pod_network_configs": flattenAdditionalPodNetworkConfig(c.AdditionalPodNetworkConfigs), @@ -1225,6 +1241,16 @@ func flattenNodeNetworkConfig(c *container.NodeNetworkConfig, d *schema.Resource return result } +func flattenNodeNetworkPerformanceConfig(c *container.NetworkPerformanceConfig) []map[string]interface{} { + result := []map[string]interface{}{} + if c != nil { + result = append(result, map[string]interface{}{ + "total_egress_bandwidth_tier": c.TotalEgressBandwidthTier, + }) + } + return result +} + <% unless version == 'ga' -%> func flattenAdditionalNodeNetworkConfig(c []*container.AdditionalNodeNetworkConfig) []map[string]interface{} { if c == nil { @@ -1321,6 +1347,14 @@ func expandNodeNetworkConfig(v interface{}) *container.NodeNetworkConfig { nnc.PodCidrOverprovisionConfig = expandPodCidrOverprovisionConfig(networkNodeConfig["pod_cidr_overprovision_config"]) + if v, ok := networkNodeConfig["network_performance_config"]; ok && len(v.([]interface{})) > 0 { + nnc.NetworkPerformanceConfig = &container.NetworkPerformanceConfig{} + network_performance_config := v.([]interface{})[0].(map[string]interface{}) + if total_egress_bandwidth_tier, ok := network_performance_config["total_egress_bandwidth_tier"]; ok { + nnc.NetworkPerformanceConfig.TotalEgressBandwidthTier = total_egress_bandwidth_tier.(string) + } + } + return nnc } @@ -1978,7 +2012,7 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node } if d.HasChange(prefix + "network_config") { - if d.HasChange(prefix + "network_config.0.enable_private_nodes") { + if d.HasChange(prefix + "network_config.0.enable_private_nodes") || d.HasChange(prefix + "network_config.0.network_performance_config") { req := &container.UpdateNodePoolRequest{ NodePoolId: name, NodeNetworkConfig: expandNodeNetworkConfig(d.Get(prefix + "network_config")), @@ -1998,7 +2032,7 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node return ContainerOperationWait(config, op, nodePoolInfo.project, nodePoolInfo.location, - "updating GKE node pool workload_metadata_config", userAgent, + "updating GKE node pool network_config", userAgent, timeout) } @@ -2006,7 +2040,7 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node return err } - log.Printf("[INFO] Updated workload_metadata_config for node pool %s", name) + log.Printf("[INFO] Updated network_config for node pool %s", name) } } diff --git a/mmv1/third_party/terraform/services/container/resource_container_node_pool_test.go.erb b/mmv1/third_party/terraform/services/container/resource_container_node_pool_test.go.erb index 7f2d7d7aeb31..f821b5a5a413 100644 --- a/mmv1/third_party/terraform/services/container/resource_container_node_pool_test.go.erb +++ b/mmv1/third_party/terraform/services/container/resource_container_node_pool_test.go.erb @@ -579,10 +579,12 @@ func TestAccContainerNodePool_withNetworkConfig(t *testing.T) { CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccContainerNodePool_withNetworkConfig(cluster, np, network), + Config: testAccContainerNodePool_withNetworkConfig(cluster, np, network, "TIER_1"), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr( "google_container_node_pool.with_pco_disabled", "network_config.0.pod_cidr_overprovision_config.0.disabled", "true"), + resource.TestCheckResourceAttr("google_container_node_pool.with_tier1_net", "network_config.0.network_performance_config.#", "1"), + resource.TestCheckResourceAttr("google_container_node_pool.with_tier1_net", "network_config.0.network_performance_config.0.total_egress_bandwidth_tier", "TIER_1"), ), }, { @@ -597,6 +599,14 @@ func TestAccContainerNodePool_withNetworkConfig(t *testing.T) { ImportStateVerify: true, ImportStateVerifyIgnore: []string{"network_config.0.create_pod_range"}, }, + // edit the updateable network config + { + Config: testAccContainerNodePool_withNetworkConfig(cluster, np, network, "TIER_UNSPECIFIED"), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_node_pool.with_tier1_net", "network_config.0.network_performance_config.#", "1"), + resource.TestCheckResourceAttr("google_container_node_pool.with_tier1_net", "network_config.0.network_performance_config.0.total_egress_bandwidth_tier", "TIER_UNSPECIFIED"), + ), + }, }, }) } @@ -2998,7 +3008,7 @@ resource "google_container_node_pool" "np" { `, cluster, networkName, subnetworkName, np, mode) } -func testAccContainerNodePool_withNetworkConfig(cluster, np, network string) string { +func testAccContainerNodePool_withNetworkConfig(cluster, np, network, netTier string) string { return fmt.Sprintf(` resource "google_compute_network" "container_network" { name = "%s" @@ -3075,7 +3085,7 @@ resource "google_container_node_pool" "with_auto_pod_cidr" { node_count = 1 network_config { create_pod_range = true - pod_range = "auto-pod-range" + pod_range = "auto-pod-range" pod_ipv4_cidr_block = "10.2.0.0/20" } node_config { @@ -3091,7 +3101,7 @@ resource "google_container_node_pool" "with_pco_disabled" { cluster = google_container_cluster.cluster.name node_count = 1 network_config { - pod_cidr_overprovision_config { + pod_cidr_overprovision_config { disabled = true } } @@ -3102,7 +3112,31 @@ resource "google_container_node_pool" "with_pco_disabled" { } } -`, network, cluster, np, np, np) +resource "google_container_node_pool" "with_tier1_net" { + name = "%s-tier1" + location = "us-central1" + cluster = google_container_cluster.cluster.name + node_count = 1 + node_locations = [ + "us-central1-a", + ] + network_config { + network_performance_config { + total_egress_bandwidth_tier = "%s" + } + } + node_config { + machine_type = "n2-standard-32" + gvnic { + enabled = true + } + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform", + ] + } +} + +`, network, cluster, np, np, np, np, netTier) } <% unless version.nil? || version == 'ga' -%> @@ -4105,4 +4139,4 @@ resource "google_container_node_pool" "without_confidential_boot_disk" { } `, cluster, networkName, subnetworkName, np) } -<% end -%> \ No newline at end of file +<% end -%> diff --git a/mmv1/third_party/terraform/website/docs/r/container_cluster.html.markdown b/mmv1/third_party/terraform/website/docs/r/container_cluster.html.markdown index 7ec0fb4e10f5..157bfb70f76e 100644 --- a/mmv1/third_party/terraform/website/docs/r/container_cluster.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/container_cluster.html.markdown @@ -957,6 +957,12 @@ sole_tenant_config { * `threads_per_core` - (Required) The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed. +* `network_performance_config` - (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)) Network bandwidth tier configuration. + +The `network_performance_config` block supports: + +* `total_egress_bandwidth_tier` (Required) - Specifies the total network bandwidth tier for the NodePool. + The `ephemeral_storage_config` block supports: * `local_ssd_count` (Required) - Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD is 375 GB in size. If zero, it means to disable using local SSDs as ephemeral storage. From cf700745b980492fa902f7d5a60c70bb11886ee3 Mon Sep 17 00:00:00 2001 From: rahul2393 Date: Wed, 6 Dec 2023 23:19:25 +0530 Subject: [PATCH 16/44] doc(spanner): add example for using spanner with autoscaler config (#9591) * doc(spanner): add example for using spanner with autoscaler config * fix references * fix tests --- mmv1/products/spanner/Instance.yaml | 6 ++++++ .../spanner_instance_with_autoscaling.tf.erb | 17 +++++++++++++++++ 2 files changed, 23 insertions(+) create mode 100644 mmv1/templates/terraform/examples/spanner_instance_with_autoscaling.tf.erb diff --git a/mmv1/products/spanner/Instance.yaml b/mmv1/products/spanner/Instance.yaml index 7d2b8f595fc9..0a353ee85dd8 100644 --- a/mmv1/products/spanner/Instance.yaml +++ b/mmv1/products/spanner/Instance.yaml @@ -63,6 +63,12 @@ examples: 'example' # Randomness skip_vcr: true + - !ruby/object:Provider::Terraform::Examples + name: 'spanner_instance_with_autoscaling' + primary_resource_id: + 'example' + # Randomness + skip_vcr: true - !ruby/object:Provider::Terraform::Examples name: 'spanner_instance_multi_regional' primary_resource_id: diff --git a/mmv1/templates/terraform/examples/spanner_instance_with_autoscaling.tf.erb b/mmv1/templates/terraform/examples/spanner_instance_with_autoscaling.tf.erb new file mode 100644 index 000000000000..3830a71a70d8 --- /dev/null +++ b/mmv1/templates/terraform/examples/spanner_instance_with_autoscaling.tf.erb @@ -0,0 +1,17 @@ +resource "google_spanner_instance" "example" { + config = "regional-us-central1" + display_name = "Test Spanner Instance" + autoscaling_config { + autoscaling_limits { + max_processing_units = 3000 + min_processing_units = 2000 + } + autoscaling_targets { + high_priority_cpu_utilization_percent = 75 + storage_utilization_percent = 90 + } + } + labels = { + "foo" = "bar" + } +} From b16a839ade68f8cbcbfa1071e089cd6b4e3af239 Mon Sep 17 00:00:00 2001 From: Swamita Gupta <55314843+swamitagupta@users.noreply.github.com> Date: Thu, 7 Dec 2023 00:32:46 +0530 Subject: [PATCH 17/44] Add ExternalAddress resource to Vmwareengine (#9573) --- .../vmwareengine/ExternalAddress.yaml | 125 ++++++++++++++ ...mware_engine_external_address_basic.tf.erb | 41 +++++ .../provider/provider_mmv1_resources.go.erb | 1 + ...ce_google_vmwareengine_external_address.go | 39 +++++ ...urce_vmwareengine_external_address_test.go | 152 ++++++++++++++++++ .../transport/error_retry_predicates.go | 11 ++ .../transport/error_retry_predicates_test.go | 11 ++ ...mwareengine_external_address.html.markdown | 32 ++++ 8 files changed, 412 insertions(+) create mode 100644 mmv1/products/vmwareengine/ExternalAddress.yaml create mode 100644 mmv1/templates/terraform/examples/vmware_engine_external_address_basic.tf.erb create mode 100644 mmv1/third_party/terraform/services/vmwareengine/data_source_google_vmwareengine_external_address.go create mode 100644 mmv1/third_party/terraform/services/vmwareengine/resource_vmwareengine_external_address_test.go create mode 100644 mmv1/third_party/terraform/website/docs/d/vmwareengine_external_address.html.markdown diff --git a/mmv1/products/vmwareengine/ExternalAddress.yaml b/mmv1/products/vmwareengine/ExternalAddress.yaml new file mode 100644 index 000000000000..f6ba9cdc6bb9 --- /dev/null +++ b/mmv1/products/vmwareengine/ExternalAddress.yaml @@ -0,0 +1,125 @@ +# Copyright 2023 Google Inc. +# Licensed under the Apache License, Version 2.0 (the License); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- !ruby/object:Api::Resource +name: 'ExternalAddress' +base_url: '{{parent}}/externalAddresses' +create_url: '{{parent}}/externalAddresses?externalAddressId={{name}}' +self_link: '{{parent}}/externalAddresses/{{name}}' +update_mask: true +update_verb: :PATCH +references: !ruby/object:Api::Resource::ReferenceLinks + api: 'https://cloud.google.com/vmware-engine/docs/reference/rest/v1/projects.locations.privateClouds.externalAddresses' +description: | + An allocated external IP address and its corresponding internal IP address in a private cloud. +async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: "name" + base_url: "{{op_id}}" + wait_ms: 1000 + timeouts: !ruby/object:Api::Timeouts + insert_minutes: 40 + result: !ruby/object:Api::OpAsync::Result + path: "response" + status: !ruby/object:Api::OpAsync::Status + path: "done" + complete: true + allowed: + - true + - false + error: !ruby/object:Api::OpAsync::Error + path: "error" + message: "message" + include_project: true + +import_format: ["{{%parent}}/externalAddresses/{{name}}"] +id_format: "{{parent}}/externalAddresses/{{name}}" +error_retry_predicates: ['transport_tpg.ExternalIpServiceNotActive'] +autogen_async: true + +examples: + - !ruby/object:Provider::Terraform::Examples + name: "vmware_engine_external_address_basic" + primary_resource_id: "vmw-engine-external-address" + skip_test: true # update tests will take care of all CRUD tests. Parent PC creation is expensive and node reservation is required. + vars: + name: "sample-external-address" + network_id: "pc-nw" + private_cloud_id: "sample-pc" + management_cluster_id: "sample-mgmt-cluster" + network_policy_id: "sample-np" + +parameters: + - !ruby/object:Api::Type::String + name: "parent" + immutable: true + required: true + url_param_only: true + description: | + The resource name of the private cloud to create a new external address in. + Resource names are schemeless URIs that follow the conventions in https://cloud.google.com/apis/design/resource_names. + For example: projects/my-project/locations/us-west1-a/privateClouds/my-cloud + + - !ruby/object:Api::Type::String + name: "name" + required: true + immutable: true + url_param_only: true + description: | + The ID of the external IP Address. + +properties: + - !ruby/object:Api::Type::Time + name: 'createTime' + output: true + description: | + Creation time of this resource. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and + up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + + - !ruby/object:Api::Type::Time + name: 'updateTime' + output: true + description: | + Last updated time of this resource. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine + fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + + - !ruby/object:Api::Type::String + name: 'internalIp' + required: true + description: | + The internal IP address of a workload VM. + + - !ruby/object:Api::Type::String + name: 'externalIp' + output: true + description: | + The external IP address of a workload VM. + + - !ruby/object:Api::Type::String + name: 'state' + description: | + State of the resource. + output: true + + - !ruby/object:Api::Type::String + name: 'uid' + output: true + description: | + System-generated unique identifier for the resource. + + - !ruby/object:Api::Type::String + name: 'description' + description: | + User-provided description for this resource. diff --git a/mmv1/templates/terraform/examples/vmware_engine_external_address_basic.tf.erb b/mmv1/templates/terraform/examples/vmware_engine_external_address_basic.tf.erb new file mode 100644 index 000000000000..874fbe25b5cf --- /dev/null +++ b/mmv1/templates/terraform/examples/vmware_engine_external_address_basic.tf.erb @@ -0,0 +1,41 @@ +resource "google_vmwareengine_network" "external-address-nw" { + name = "<%= ctx[:vars]['network_id'] %>" + location = "global" + type = "STANDARD" + description = "PC network description." +} + +resource "google_vmwareengine_private_cloud" "external-address-pc" { + location = "<%= ctx[:test_env_vars]['region'] %>-a" + name = "<%= ctx[:vars]['private_cloud_id'] %>" + description = "Sample test PC." + network_config { + management_cidr = "192.168.50.0/24" + vmware_engine_network = google_vmwareengine_network.external-address-nw.id + } + + management_cluster { + cluster_id = "<%= ctx[:vars]['management_cluster_id'] %>" + node_type_configs { + node_type_id = "standard-72" + node_count = 3 + } + } +} + +resource "google_vmwareengine_network_policy" "external-address-np" { + location = "<%= ctx[:test_env_vars]['region'] %>" + name = "<%= ctx[:vars]['network_policy_id'] %>" + edge_services_cidr = "192.168.30.0/26" + vmware_engine_network = google_vmwareengine_network.external-address-nw.id +} + +resource "google_vmwareengine_external_address" "<%= ctx[:primary_resource_id] %>" { + name = "<%= ctx[:vars]['name'] %>" + parent = google_vmwareengine_private_cloud.external-address-pc.id + internal_ip = "192.168.0.66" + description = "Sample description." + depends_on = [ + google_vmwareengine_network_policy.external-address-np, + ] +} \ No newline at end of file diff --git a/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb b/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb index 8f41adb2b48e..baa795cf7d38 100644 --- a/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb +++ b/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb @@ -202,6 +202,7 @@ var handwrittenDatasources = map[string]*schema.Resource{ <% unless version == 'ga' -%> "google_vmwareengine_cluster": vmwareengine.DataSourceVmwareengineCluster(), <% end -%> + "google_vmwareengine_external_address": vmwareengine.DataSourceVmwareengineExternalAddress(), "google_vmwareengine_network": vmwareengine.DataSourceVmwareengineNetwork(), "google_vmwareengine_network_peering": vmwareengine.DataSourceVmwareengineNetworkPeering(), "google_vmwareengine_network_policy": vmwareengine.DataSourceVmwareengineNetworkPolicy(), diff --git a/mmv1/third_party/terraform/services/vmwareengine/data_source_google_vmwareengine_external_address.go b/mmv1/third_party/terraform/services/vmwareengine/data_source_google_vmwareengine_external_address.go new file mode 100644 index 000000000000..f7d4b1120124 --- /dev/null +++ b/mmv1/third_party/terraform/services/vmwareengine/data_source_google_vmwareengine_external_address.go @@ -0,0 +1,39 @@ +package vmwareengine + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceVmwareengineExternalAddress() *schema.Resource { + + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceVmwareengineExternalAddress().Schema) + tpgresource.AddRequiredFieldsToSchema(dsSchema, "parent", "name") + return &schema.Resource{ + Read: dataSourceVmwareengineExternalAddressRead, + Schema: dsSchema, + } +} + +func dataSourceVmwareengineExternalAddressRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{parent}}/externalAddresses/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + err = resourceVmwareengineExternalAddressRead(d, meta) + if err != nil { + return err + } + + if d.Id() == "" { + return fmt.Errorf("%s not found", id) + } + return nil +} diff --git a/mmv1/third_party/terraform/services/vmwareengine/resource_vmwareengine_external_address_test.go b/mmv1/third_party/terraform/services/vmwareengine/resource_vmwareengine_external_address_test.go new file mode 100644 index 000000000000..48ede6c32f05 --- /dev/null +++ b/mmv1/third_party/terraform/services/vmwareengine/resource_vmwareengine_external_address_test.go @@ -0,0 +1,152 @@ +package vmwareengine_test + +import ( + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func TestAccVmwareengineExternalAddress_vmwareEngineExternalAddressUpdate(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "region": "southamerica-east1", // using region with low node utilization. + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckVmwareengineExternalAddressDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testVmwareEngineExternalAddressConfig(context, "description1", "192.168.0.66"), + Check: resource.ComposeTestCheckFunc( + acctest.CheckDataSourceStateMatchesResourceStateWithIgnores("data.google_vmwareengine_external_address.ds", "google_vmwareengine_external_address.vmw-engine-external-address", map[string]struct{}{}), + ), + }, + { + ResourceName: "google_vmwareengine_external_address.vmw-engine-external-address", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"parent", "name"}, + }, + { + Config: testVmwareEngineExternalAddressConfig(context, "description2", "192.168.0.67"), + }, + { + ResourceName: "google_vmwareengine_external_address.vmw-engine-external-address", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"parent", "name"}, + }, + }, + }) +} + +func testVmwareEngineExternalAddressConfig(context map[string]interface{}, description string, internalIp string) string { + context["internal_ip"] = internalIp + context["description"] = description + return acctest.Nprintf(` +resource "google_vmwareengine_network" "external-address-nw" { + name = "tf-test-sample-external-address-nw%{random_suffix}" + location = "global" + type = "STANDARD" + description = "PC network description." +} + +resource "google_vmwareengine_private_cloud" "external-address-pc" { + location = "%{region}-a" + name = "tf-test-sample-external-address-pc%{random_suffix}" + description = "Sample test PC." + network_config { + management_cidr = "192.168.1.0/24" + vmware_engine_network = google_vmwareengine_network.external-address-nw.id + } + + management_cluster { + cluster_id = "tf-test-sample-external-address-cluster%{random_suffix}" + node_type_configs { + node_type_id = "standard-72" + node_count = 3 + } + } +} + +resource "google_vmwareengine_network_policy" "external-address-np" { + location = "%{region}" + name = "tf-test-sample-external-address-np%{random_suffix}" + edge_services_cidr = "192.168.0.0/26" + vmware_engine_network = google_vmwareengine_network.external-address-nw.id + internet_access { + enabled = true + } + external_ip { + enabled = true + } +} + +resource "google_vmwareengine_external_address" "vmw-engine-external-address" { + name = "tf-test-sample-external-address%{random_suffix}" + parent = google_vmwareengine_private_cloud.external-address-pc.id + internal_ip = "%{internal_ip}" + description = "%{description}" + depends_on = [ + google_vmwareengine_network_policy.external-address-np, + ] +} + +data "google_vmwareengine_external_address" "ds" { + name = google_vmwareengine_external_address.vmw-engine-external-address.name + parent = google_vmwareengine_private_cloud.external-address-pc.id + depends_on = [ + google_vmwareengine_external_address.vmw-engine-external-address, + ] +} +`, context) +} + +func testAccCheckVmwareengineExternalAddressDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "google_vmwareengine_external_address" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + url, err := tpgresource.ReplaceVarsForTest(config, rs, "{{VmwareengineBasePath}}{{parent}}/externalAddresses/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: config.UserAgent, + }) + if err == nil { + return fmt.Errorf("VmwareengineExternalAddress still exists at %s", url) + } + } + + return nil + } +} diff --git a/mmv1/third_party/terraform/transport/error_retry_predicates.go b/mmv1/third_party/terraform/transport/error_retry_predicates.go index d023c0b47b19..3f1c95ca2fe2 100644 --- a/mmv1/third_party/terraform/transport/error_retry_predicates.go +++ b/mmv1/third_party/terraform/transport/error_retry_predicates.go @@ -521,3 +521,14 @@ func IsForbiddenIamServiceAccountRetryableError(opType string) RetryErrorPredica return false, "" } } + +// Retry the creation of `google_vmwareengine_external_address` resource if the network policy's +// External IP field is not active yet. +func ExternalIpServiceNotActive(err error) (bool, string) { + if gerr, ok := err.(*googleapi.Error); ok { + if gerr.Code == 400 && strings.Contains(gerr.Body, "External IP address network service is not active in the provided network policy") { + return true, "Waiting for external ip service to be enabled" + } + } + return false, "" +} diff --git a/mmv1/third_party/terraform/transport/error_retry_predicates_test.go b/mmv1/third_party/terraform/transport/error_retry_predicates_test.go index a097e8219e5b..6df6978cca86 100644 --- a/mmv1/third_party/terraform/transport/error_retry_predicates_test.go +++ b/mmv1/third_party/terraform/transport/error_retry_predicates_test.go @@ -203,3 +203,14 @@ func TestFirestoreIndex409_retryUnderlyingDataChanged(t *testing.T) { t.Errorf("Error not detected as retryable") } } + +func TestExternalIpServiceNotActive(t *testing.T) { + err := googleapi.Error{ + Code: 400, + Body: "External IP address network service is not active in the provided network policy", + } + isRetryable, _ := ExternalIpServiceNotActive(&err) + if !isRetryable { + t.Errorf("Error not detected as retryable") + } +} diff --git a/mmv1/third_party/terraform/website/docs/d/vmwareengine_external_address.html.markdown b/mmv1/third_party/terraform/website/docs/d/vmwareengine_external_address.html.markdown new file mode 100644 index 000000000000..862467c92bc7 --- /dev/null +++ b/mmv1/third_party/terraform/website/docs/d/vmwareengine_external_address.html.markdown @@ -0,0 +1,32 @@ +--- +subcategory: "Cloud VMware Engine" +description: |- + Get information about a external address. +--- + +# google\_vmwareengine\_external_address + +Use this data source to get details about a external address resource. + +To get more information about external address, see: +* [API documentation](https://cloud.google.com/vmware-engine/docs/reference/rest/v1/projects.locations.privateClouds.externalAddresses) + +## Example Usage + +```hcl +data "google_vmwareengine_external_address" "my_external_address" { + name = "my-external-address" + parent = "project/my-project/locations/us-west1-a/privateClouds/my-cloud" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) Name of the resource. +* `parent` - (Required) The resource name of the private cloud that this cluster belongs. + +## Attributes Reference + +See [google_vmwareengine_external_address](https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/vmwareengine_external_address#attributes-reference) resource for details of all the available attributes. \ No newline at end of file From 3547ac8d589d09f0b66f4c1fe3e1dfd9783c87d0 Mon Sep 17 00:00:00 2001 From: Swamita Gupta <55314843+swamitagupta@users.noreply.github.com> Date: Thu, 7 Dec 2023 00:45:18 +0530 Subject: [PATCH 18/44] Add Subnet resource to Vmwareengine (#9560) --- mmv1/products/vmwareengine/Subnet.yaml | 168 ++++++++++++++++++ .../vmware_engine_subnet_user_defined.tf.erb | 30 ++++ .../provider/provider_mmv1_resources.go.erb | 1 + .../data_source_google_vmwareengine_subnet.go | 38 ++++ .../resource_vmwareengine_subnet_test.go | 90 ++++++++++ .../docs/d/vmwareengine_subnet.html.markdown | 34 ++++ 6 files changed, 361 insertions(+) create mode 100644 mmv1/products/vmwareengine/Subnet.yaml create mode 100644 mmv1/templates/terraform/examples/vmware_engine_subnet_user_defined.tf.erb create mode 100644 mmv1/third_party/terraform/services/vmwareengine/data_source_google_vmwareengine_subnet.go create mode 100644 mmv1/third_party/terraform/services/vmwareengine/resource_vmwareengine_subnet_test.go create mode 100644 mmv1/third_party/terraform/website/docs/d/vmwareengine_subnet.html.markdown diff --git a/mmv1/products/vmwareengine/Subnet.yaml b/mmv1/products/vmwareengine/Subnet.yaml new file mode 100644 index 000000000000..41faa4dd8751 --- /dev/null +++ b/mmv1/products/vmwareengine/Subnet.yaml @@ -0,0 +1,168 @@ +# Copyright 2023 Google Inc. +# Licensed under the Apache License, Version 2.0 (the License); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- !ruby/object:Api::Resource +name: 'Subnet' +base_url: '{{parent}}/subnets' +create_url: '{{parent}}/subnets/{{name}}?update_mask=ip_cidr_range' +self_link: '{{parent}}/subnets/{{name}}' +update_mask: true +create_verb: :PATCH +update_verb: :PATCH +skip_delete: true +references: !ruby/object:Api::Resource::ReferenceLinks + api: 'https://cloud.google.com/vmware-engine/docs/reference/rest/v1/projects.locations.privateClouds.subnets' +description: | + Subnet in a private cloud. A Private Cloud contains two types of subnets: `management` subnets (such as vMotion) that + are read-only,and `userDefined`, which can also be updated. This resource should be used to read and update `userDefined` + subnets. To read `management` subnets, please utilize the subnet data source. +async: !ruby/object:Api::OpAsync + actions: ['create', 'update'] + operation: !ruby/object:Api::OpAsync::Operation + path: "name" + base_url: "{{op_id}}" + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: "response" + status: !ruby/object:Api::OpAsync::Status + path: "done" + complete: true + allowed: + - true + - false + error: !ruby/object:Api::OpAsync::Error + path: "error" + message: "message" + include_project: true + +import_format: ["{{%parent}}/subnets/{{name}}"] +id_format: "{{parent}}/subnets/{{name}}" +autogen_async: true + +examples: + - !ruby/object:Provider::Terraform::Examples + name: "vmware_engine_subnet_user_defined" + skip_test: true # update tests will take care of read and update. Parent PC creation is expensive and node reservation is required. + primary_resource_id: "vmw-engine-subnet" + vars: + private_cloud_id: "sample-pc" + management_cluster_id: "sample-mgmt-cluster" + network_id: "pc-nw" + subnet_id: "service-1" + test_env_vars: + region: :REGION + +parameters: + - !ruby/object:Api::Type::String + name: "parent" + immutable: true + required: true + url_param_only: true + description: | + The resource name of the private cloud to create a new subnet in. + Resource names are schemeless URIs that follow the conventions in https://cloud.google.com/apis/design/resource_names. + For example: projects/my-project/locations/us-west1-a/privateClouds/my-cloud + + - !ruby/object:Api::Type::String + name: "name" + required: true + immutable: true + url_param_only: true + description: | + The ID of the subnet. For userDefined subnets, this name should be in the format of "service-n", + where n ranges from 1 to 5. + +properties: + - !ruby/object:Api::Type::Time + name: 'createTime' + output: true + description: | + Creation time of this resource. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and + up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + + - !ruby/object:Api::Type::Time + name: 'updateTime' + output: true + description: | + Last updated time of this resource. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine + fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + + - !ruby/object:Api::Type::String + name: 'ipCidrRange' + required: true + send_empty_value: true + description: | + The IP address range of the subnet in CIDR format. + + - !ruby/object:Api::Type::String + name: 'gatewayIp' + output: true + description: | + The IP address of the gateway of this subnet. Must fall within the IP prefix defined above. + + - !ruby/object:Api::Type::String + name: 'gatewayId' + output: true + description: | + The canonical identifier of the logical router that this subnet is attached to. + + - !ruby/object:Api::Type::Array + name: 'dhcpAddressRanges' + output: true + description: | + DHCP address ranges. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'firstAddress' + output: true + description: | + The first IP address of the range. + - !ruby/object:Api::Type::String + name: 'lastAddress' + output: true + description: | + The last IP address of the range. + + - !ruby/object:Api::Type::String + name: 'type' + output: true + description: | + The type of the subnet. + + - !ruby/object:Api::Type::Boolean + name: standardConfig + output: true + description: | + Whether the NSX-T configuration in the backend follows the standard configuration supported by Google Cloud. + If false, the subnet cannot be modified through Google Cloud, only through NSX-T directly. + + - !ruby/object:Api::Type::String + name: 'state' + description: | + State of the subnet. + output: true + + - !ruby/object:Api::Type::String + name: 'uid' + output: true + description: | + System-generated unique identifier for the resource. + + - !ruby/object:Api::Type::Integer + name: 'vlanId' + output: true + description: | + VLAN ID of the VLAN on which the subnet is configured. diff --git a/mmv1/templates/terraform/examples/vmware_engine_subnet_user_defined.tf.erb b/mmv1/templates/terraform/examples/vmware_engine_subnet_user_defined.tf.erb new file mode 100644 index 000000000000..aeedd117bac4 --- /dev/null +++ b/mmv1/templates/terraform/examples/vmware_engine_subnet_user_defined.tf.erb @@ -0,0 +1,30 @@ +resource "google_vmwareengine_network" "subnet-nw" { + name = "<%= ctx[:vars]['network_id'] %>" + location = "global" + type = "STANDARD" + description = "PC network description." +} + +resource "google_vmwareengine_private_cloud" "subnet-pc" { + location = "<%= ctx[:test_env_vars]['region'] %>-a" + name = "<%= ctx[:vars]['private_cloud_id'] %>" + description = "Sample test PC." + network_config { + management_cidr = "192.168.50.0/24" + vmware_engine_network = google_vmwareengine_network.subnet-nw.id + } + + management_cluster { + cluster_id = "<%= ctx[:vars]['management_cluster_id'] %>" + node_type_configs { + node_type_id = "standard-72" + node_count = 3 + } + } +} + +resource "google_vmwareengine_subnet" "<%= ctx[:primary_resource_id] %>" { + name = "<%= ctx[:vars]['subnet_id'] %>" + parent = google_vmwareengine_private_cloud.subnet-pc.id + ip_cidr_range = "192.168.100.0/26" +} diff --git a/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb b/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb index baa795cf7d38..80aa24eae9d3 100644 --- a/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb +++ b/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb @@ -208,6 +208,7 @@ var handwrittenDatasources = map[string]*schema.Resource{ "google_vmwareengine_network_policy": vmwareengine.DataSourceVmwareengineNetworkPolicy(), "google_vmwareengine_nsx_credentials": vmwareengine.DataSourceVmwareengineNsxCredentials(), "google_vmwareengine_private_cloud": vmwareengine.DataSourceVmwareenginePrivateCloud(), + "google_vmwareengine_subnet": vmwareengine.DataSourceVmwareengineSubnet(), // ####### END handwritten datasources ########### } diff --git a/mmv1/third_party/terraform/services/vmwareengine/data_source_google_vmwareengine_subnet.go b/mmv1/third_party/terraform/services/vmwareengine/data_source_google_vmwareengine_subnet.go new file mode 100644 index 000000000000..eb6af6c08ab6 --- /dev/null +++ b/mmv1/third_party/terraform/services/vmwareengine/data_source_google_vmwareengine_subnet.go @@ -0,0 +1,38 @@ +package vmwareengine + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceVmwareengineSubnet() *schema.Resource { + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceVmwareengineSubnet().Schema) + tpgresource.AddRequiredFieldsToSchema(dsSchema, "parent", "name") + return &schema.Resource{ + Read: dataSourceVmwareengineSubnetRead, + Schema: dsSchema, + } +} + +func dataSourceVmwareengineSubnetRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{parent}}/subnets/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + err = resourceVmwareengineSubnetRead(d, meta) + if err != nil { + return err + } + + if d.Id() == "" { + return fmt.Errorf("%s not found", id) + } + return nil +} diff --git a/mmv1/third_party/terraform/services/vmwareengine/resource_vmwareengine_subnet_test.go b/mmv1/third_party/terraform/services/vmwareengine/resource_vmwareengine_subnet_test.go new file mode 100644 index 000000000000..bf233f1dfde2 --- /dev/null +++ b/mmv1/third_party/terraform/services/vmwareengine/resource_vmwareengine_subnet_test.go @@ -0,0 +1,90 @@ +package vmwareengine_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccVmwareengineSubnet_vmwareEngineUserDefinedSubnetUpdate(t *testing.T) { + acctest.SkipIfVcr(t) + t.Parallel() + + context := map[string]interface{}{ + "region": "southamerica-west1", // using region with low node utilization. + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testVmwareEngineSubnetConfig(context, "192.168.1.0/26"), + Check: resource.ComposeTestCheckFunc( + acctest.CheckDataSourceStateMatchesResourceStateWithIgnores("data.google_vmwareengine_subnet.ds", "google_vmwareengine_subnet.vmw-engine-subnet", map[string]struct{}{}), + ), + }, + { + ResourceName: "google_vmwareengine_subnet.vmw-engine-subnet", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"parent", "name"}, + }, + { + Config: testVmwareEngineSubnetConfig(context, "192.168.2.0/26"), + }, + { + ResourceName: "google_vmwareengine_subnet.vmw-engine-subnet", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"parent", "name"}, + }, + }, + }) +} + +func testVmwareEngineSubnetConfig(context map[string]interface{}, ipCidrRange string) string { + context["ip_cidr_range"] = ipCidrRange + return acctest.Nprintf(` +resource "google_vmwareengine_network" "subnet-nw" { + name = "tf-test-subnet-nw%{random_suffix}" + location = "global" + type = "STANDARD" + description = "PC network description." +} + +resource "google_vmwareengine_private_cloud" "subnet-pc" { + location = "%{region}-a" + name = "tf-test-subnet-pc%{random_suffix}" + description = "Sample test PC." + network_config { + management_cidr = "192.168.0.0/24" + vmware_engine_network = google_vmwareengine_network.subnet-nw.id + } + + management_cluster { + cluster_id = "tf-test-mgmt-cluster%{random_suffix}" + node_type_configs { + node_type_id = "standard-72" + node_count = 3 + } + } +} + +resource "google_vmwareengine_subnet" "vmw-engine-subnet" { + name = "service-2" + parent = google_vmwareengine_private_cloud.subnet-pc.id + ip_cidr_range = "%{ip_cidr_range}" +} + +data "google_vmwareengine_subnet" ds { + name = "service-2" + parent = google_vmwareengine_private_cloud.subnet-pc.id + depends_on = [ + google_vmwareengine_subnet.vmw-engine-subnet, + ] +} +`, context) +} diff --git a/mmv1/third_party/terraform/website/docs/d/vmwareengine_subnet.html.markdown b/mmv1/third_party/terraform/website/docs/d/vmwareengine_subnet.html.markdown new file mode 100644 index 000000000000..0c6c77db835d --- /dev/null +++ b/mmv1/third_party/terraform/website/docs/d/vmwareengine_subnet.html.markdown @@ -0,0 +1,34 @@ +--- +subcategory: "Cloud VMware Engine" +description: |- + Get info about a private cloud subnet. +--- + +# google\_vmwareengine\_subnet + +Use this data source to get details about a subnet. Management subnets support only read operations and should be configured through this data source. User defined subnets can be configured using the resource as well as the datasource. + +To get more information about private cloud subnet, see: +* [API documentation](https://cloud.google.com/vmware-engine/docs/reference/rest/v1/projects.locations.privateClouds.subnets) + +## Example Usage + +```hcl +data "google_vmwareengine_subnet" "my_subnet" { + name = "service-1" + parent = "project/my-project/locations/us-west1-a/privateClouds/my-cloud" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) Name of the resource. +UserDefined subnets are named in the format of "service-n", where n ranges from 1 to 5. +Management subnets have arbitary names including "vmotion", "vsan", "system-management" etc. More details about subnet names can be found on the cloud console. +* `parent` - (Required) The resource name of the private cloud that this subnet belongs. + +## Attributes Reference + +See [google_vmwareengine_subnet](https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/vmwareengine_subnet#attributes-reference) resource for details of all the available attributes. \ No newline at end of file From 0ca113a42f07ec89a9d49394f69796243f9d9a8b Mon Sep 17 00:00:00 2001 From: Obada Alabbadi <76101898+obada-ab@users.noreply.github.com> Date: Wed, 6 Dec 2023 21:17:20 +0100 Subject: [PATCH 19/44] Add external dataset reference to bigquery dataset (#9440) * Add external dataset reference to bigquery dataset * Update external dataset test * Refactor external dataset reference --- mmv1/products/bigquery/Dataset.yaml | 34 +++++++++++++++++++ mmv1/products/bigquery/product.yaml | 3 ++ ...uery_dataset_external_reference_aws.tf.erb | 12 +++++++ ...dataset_external_reference_aws_docs.tf.erb | 12 +++++++ 4 files changed, 61 insertions(+) create mode 100644 mmv1/templates/terraform/examples/bigquery_dataset_external_reference_aws.tf.erb create mode 100644 mmv1/templates/terraform/examples/bigquery_dataset_external_reference_aws_docs.tf.erb diff --git a/mmv1/products/bigquery/Dataset.yaml b/mmv1/products/bigquery/Dataset.yaml index 4e8540977f17..c865bc242b68 100644 --- a/mmv1/products/bigquery/Dataset.yaml +++ b/mmv1/products/bigquery/Dataset.yaml @@ -78,6 +78,20 @@ examples: vars: dataset_id: 'example_dataset' account_name: 'bqowner' + - !ruby/object:Provider::Terraform::Examples + name: 'bigquery_dataset_external_reference_aws' + primary_resource_id: 'dataset' + min_version: beta + skip_docs: true + vars: + dataset_id: 'example_dataset' + - !ruby/object:Provider::Terraform::Examples + name: 'bigquery_dataset_external_reference_aws_docs' + primary_resource_id: 'dataset' + min_version: beta + skip_test: true + vars: + dataset_id: 'example_dataset' virtual_fields: - !ruby/object:Api::Type::Boolean name: 'delete_contents_on_destroy' @@ -291,6 +305,26 @@ properties: description: | A hash of the resource. output: true + - !ruby/object:Api::Type::NestedObject + name: 'externalDatasetReference' + description: | + Information about the external metadata storage where the dataset is defined. + min_version: beta + immutable: true + properties: + - !ruby/object:Api::Type::String + name: 'externalSource' + description: | + External source that backs this dataset. + required: true + immutable: true + - !ruby/object:Api::Type::String + name: 'connection' + description: | + The connection id that is used to access the externalSource. + Format: projects/{projectId}/locations/{locationId}/connections/{connectionId} + required: true + immutable: true - !ruby/object:Api::Type::String name: 'friendlyName' description: A descriptive name for the dataset diff --git a/mmv1/products/bigquery/product.yaml b/mmv1/products/bigquery/product.yaml index 4738d4232d24..f280ad33d2a1 100644 --- a/mmv1/products/bigquery/product.yaml +++ b/mmv1/products/bigquery/product.yaml @@ -19,5 +19,8 @@ versions: - !ruby/object:Api::Product::Version name: ga base_url: https://bigquery.googleapis.com/bigquery/v2/ + - !ruby/object:Api::Product::Version + name: beta + base_url: https://bigquery.googleapis.com/bigquery/v2/ scopes: - https://www.googleapis.com/auth/bigquery diff --git a/mmv1/templates/terraform/examples/bigquery_dataset_external_reference_aws.tf.erb b/mmv1/templates/terraform/examples/bigquery_dataset_external_reference_aws.tf.erb new file mode 100644 index 000000000000..9018a26c6a78 --- /dev/null +++ b/mmv1/templates/terraform/examples/bigquery_dataset_external_reference_aws.tf.erb @@ -0,0 +1,12 @@ +resource "google_bigquery_dataset" "<%= ctx[:primary_resource_id] %>" { + provider = google-beta + dataset_id = "<%= ctx[:vars]['dataset_id'] %>" + friendly_name = "test" + description = "This is a test description" + location = "aws-us-east-1" + + external_dataset_reference { + external_source = "aws-glue://arn:aws:glue:us-east-1:772042918353:database/db_other_formats_external" + connection = "projects/bigquerytestdefault/locations/aws-us-east-1/connections/external_test-connection" + } +} diff --git a/mmv1/templates/terraform/examples/bigquery_dataset_external_reference_aws_docs.tf.erb b/mmv1/templates/terraform/examples/bigquery_dataset_external_reference_aws_docs.tf.erb new file mode 100644 index 000000000000..a509fb47c6d1 --- /dev/null +++ b/mmv1/templates/terraform/examples/bigquery_dataset_external_reference_aws_docs.tf.erb @@ -0,0 +1,12 @@ +resource "google_bigquery_dataset" "<%= ctx[:primary_resource_id] %>" { + provider = google-beta + dataset_id = "<%= ctx[:vars]['dataset_id'] %>" + friendly_name = "test" + description = "This is a test description" + location = "aws-us-east-1" + + external_dataset_reference { + external_source = "aws-glue://arn:aws:glue:us-east-1:999999999999:database/database" + connection = "projects/project/locations/aws-us-east-1/connections/connection" + } +} From 9b9aa20fc8ac98a3fe9ac350b6c108822fb435d9 Mon Sep 17 00:00:00 2001 From: Swamita Gupta <55314843+swamitagupta@users.noreply.github.com> Date: Thu, 7 Dec 2023 06:25:01 +0530 Subject: [PATCH 20/44] Add VCenter Credentials to Vmwareengine (#9572) --- .../provider/provider_mmv1_resources.go.erb | 3 +- ...google_vmwareengine_vcenter_credentials.go | 90 +++++++++++++++++++ ...esource_vmwareengine_private_cloud_test.go | 24 ++++- ...reengine_vcenter_credentials.html.markdown | 33 +++++++ 4 files changed, 148 insertions(+), 2 deletions(-) create mode 100644 mmv1/third_party/terraform/services/vmwareengine/data_source_google_vmwareengine_vcenter_credentials.go create mode 100644 mmv1/third_party/terraform/website/docs/d/vmwareengine_vcenter_credentials.html.markdown diff --git a/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb b/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb index 80aa24eae9d3..38e02676b2ec 100644 --- a/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb +++ b/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb @@ -208,7 +208,8 @@ var handwrittenDatasources = map[string]*schema.Resource{ "google_vmwareengine_network_policy": vmwareengine.DataSourceVmwareengineNetworkPolicy(), "google_vmwareengine_nsx_credentials": vmwareengine.DataSourceVmwareengineNsxCredentials(), "google_vmwareengine_private_cloud": vmwareengine.DataSourceVmwareenginePrivateCloud(), - "google_vmwareengine_subnet": vmwareengine.DataSourceVmwareengineSubnet(), + "google_vmwareengine_subnet": vmwareengine.DataSourceVmwareengineSubnet(), + "google_vmwareengine_vcenter_credentials": vmwareengine.DataSourceVmwareengineVcenterCredentials(), // ####### END handwritten datasources ########### } diff --git a/mmv1/third_party/terraform/services/vmwareengine/data_source_google_vmwareengine_vcenter_credentials.go b/mmv1/third_party/terraform/services/vmwareengine/data_source_google_vmwareengine_vcenter_credentials.go new file mode 100644 index 000000000000..45fb9b71a6bd --- /dev/null +++ b/mmv1/third_party/terraform/services/vmwareengine/data_source_google_vmwareengine_vcenter_credentials.go @@ -0,0 +1,90 @@ +package vmwareengine + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceVmwareengineVcenterCredentials() *schema.Resource { + return &schema.Resource{ + Read: dataSourceVmwareengineVcenterCredentialsRead, + Schema: map[string]*schema.Schema{ + "parent": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The resource name of the private cloud which contains vcenter. +Resource names are schemeless URIs that follow the conventions in https://cloud.google.com/apis/design/resource_names. +For example: projects/my-project/locations/us-west1-a/privateClouds/my-cloud`, + }, + "username": { + Type: schema.TypeString, + Computed: true, + Description: `Initial username.`, + }, + "password": { + Type: schema.TypeString, + Computed: true, + Description: `Initial password.`, + }, + }, + } +} + +func dataSourceVmwareengineVcenterCredentialsRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{VmwareengineBasePath}}{{parent}}:showVcenterCredentials") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + ErrorAbortPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.Is429QuotaError}, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("VmwareengineVcenterCredentials %q", d.Id())) + } + + if err := d.Set("username", flattenVmwareengineVcenterCredentailsUsername(res["username"], d, config)); err != nil { + return fmt.Errorf("Error reading VcenterCredentails: %s", err) + } + if err := d.Set("password", flattenVmwareengineVcenterCredentailsPassword(res["password"], d, config)); err != nil { + return fmt.Errorf("Error reading VcenterCredentails: %s", err) + } + + id, err := tpgresource.ReplaceVars(d, config, "{{parent}}:vcenter-credentials") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return nil +} + +func flattenVmwareengineVcenterCredentailsUsername(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenVmwareengineVcenterCredentailsPassword(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} diff --git a/mmv1/third_party/terraform/services/vmwareengine/resource_vmwareengine_private_cloud_test.go b/mmv1/third_party/terraform/services/vmwareengine/resource_vmwareengine_private_cloud_test.go index 54ede7d89284..9a189bd1c378 100644 --- a/mmv1/third_party/terraform/services/vmwareengine/resource_vmwareengine_private_cloud_test.go +++ b/mmv1/third_party/terraform/services/vmwareengine/resource_vmwareengine_private_cloud_test.go @@ -31,6 +31,7 @@ func TestAccVmwareenginePrivateCloud_vmwareEnginePrivateCloudUpdate(t *testing.T Check: resource.ComposeTestCheckFunc( acctest.CheckDataSourceStateMatchesResourceStateWithIgnores("data.google_vmwareengine_private_cloud.ds", "google_vmwareengine_private_cloud.vmw-engine-pc", map[string]struct{}{}), testAccCheckGoogleVmwareengineNsxCredentialsMeta("data.google_vmwareengine_nsx_credentials.nsx-ds"), + testAccCheckGoogleVmwareengineVcenterCredentialsMeta("data.google_vmwareengine_vcenter_credentials.vcenter-ds"), ), }, { @@ -99,9 +100,12 @@ data "google_vmwareengine_private_cloud" "ds" { ] } -# NSX Credentials is a child datasource of PC and is included in the PC test due to the high deployment time involved in the Creation and deletion of a PC +# NSX and Vcenter Credentials are child datasources of PC and are included in the PC test due to the high deployment time involved in the Creation and deletion of a PC data "google_vmwareengine_nsx_credentials" "nsx-ds" { parent = google_vmwareengine_private_cloud.vmw-engine-pc + +data "google_vmwareengine_vcenter_credentials" "vcenter-ds" { + parent = google_vmwareengine_private_cloud.vmw-engine-pc.id } `, context) @@ -125,6 +129,24 @@ func testAccCheckGoogleVmwareengineNsxCredentialsMeta(n string) resource.TestChe } } +func testAccCheckGoogleVmwareengineVcenterCredentialsMeta(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Can't find vcenter credentials data source: %s", n) + } + _, ok = rs.Primary.Attributes["username"] + if !ok { + return fmt.Errorf("can't find 'username' attribute in data source: %s", n) + } + _, ok = rs.Primary.Attributes["password"] + if !ok { + return fmt.Errorf("can't find 'password' attribute in data source: %s", n) + } + return nil + } +} + func testAccCheckVmwareenginePrivateCloudDestroyProducer(t *testing.T) func(s *terraform.State) error { return func(s *terraform.State) error { for name, rs := range s.RootModule().Resources { diff --git a/mmv1/third_party/terraform/website/docs/d/vmwareengine_vcenter_credentials.html.markdown b/mmv1/third_party/terraform/website/docs/d/vmwareengine_vcenter_credentials.html.markdown new file mode 100644 index 000000000000..b2050d38c910 --- /dev/null +++ b/mmv1/third_party/terraform/website/docs/d/vmwareengine_vcenter_credentials.html.markdown @@ -0,0 +1,33 @@ +--- +subcategory: "Cloud VMware Engine" +description: |- + Get Vcenter Credentials of a Private Cloud. +--- + +# google\_vmwareengine\_vcenter_credentials + +Use this data source to get Vcenter credentials for a Private Cloud. + +To get more information about private cloud Vcenter credentials, see: +* [API documentation](https://cloud.google.com/vmware-engine/docs/reference/rest/v1/projects.locations.privateClouds/showVcenterCredentials) + +## Example Usage + +```hcl +data "google_vmwareengine_vcenter_credentials" "ds" { + parent = "projects/my-project/locations/us-west1-a/privateClouds/my-cloud" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `parent` - (Required) The resource name of the private cloud which contains the Vcenter. + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `username` - The username of the Vcenter Credential. +* `password` - The password of the Vcenter Credential. \ No newline at end of file From 0782e1a9c6d73bbf636bef7e97e01e92cd99d226 Mon Sep 17 00:00:00 2001 From: Mauricio Alvarez Leon <65101411+BBBmau@users.noreply.github.com> Date: Wed, 6 Dec 2023 16:56:02 -0800 Subject: [PATCH 21/44] Add `saml` field to `google_iam_workload_identity_pool_provider` resource (#9061) * add saml block to workloadIdentityPoolProvider resource * add tf examples for saml tests * add newline in yaml * lintcheck * fix saml tests * add metadata.xml file for testing * apply to correct iam service * change iam beta endpoint from v1beta to v1 --- .../iambeta/WorkloadIdentityPoolProvider.yaml | 32 +++++++++++++++++-- mmv1/products/iambeta/product.yaml | 2 +- ...d_identity_pool_provider_saml_basic.tf.erb | 16 ++++++++++ ...ad_identity_pool_provider_saml_full.tf.erb | 19 +++++++++++ .../iambeta/test-fixtures/metadata.xml | 14 ++++++++ 5 files changed, 80 insertions(+), 3 deletions(-) create mode 100644 mmv1/templates/terraform/examples/iam_workload_identity_pool_provider_saml_basic.tf.erb create mode 100644 mmv1/templates/terraform/examples/iam_workload_identity_pool_provider_saml_full.tf.erb create mode 100644 mmv1/third_party/terraform/services/iambeta/test-fixtures/metadata.xml diff --git a/mmv1/products/iambeta/WorkloadIdentityPoolProvider.yaml b/mmv1/products/iambeta/WorkloadIdentityPoolProvider.yaml index f5eea3482da5..808f8ab45efc 100644 --- a/mmv1/products/iambeta/WorkloadIdentityPoolProvider.yaml +++ b/mmv1/products/iambeta/WorkloadIdentityPoolProvider.yaml @@ -53,6 +53,18 @@ examples: vars: workload_identity_pool_id: 'example-pool' workload_identity_pool_provider_id: 'example-prvdr' + - !ruby/object:Provider::Terraform::Examples + name: 'iam_workload_identity_pool_provider_saml_basic' + primary_resource_id: 'example' + vars: + workload_identity_pool_id: 'example-pool' + workload_identity_pool_provider_id: 'example-prvdr' + - !ruby/object:Provider::Terraform::Examples + name: 'iam_workload_identity_pool_provider_saml_full' + primary_resource_id: 'example' + vars: + workload_identity_pool_id: 'example-pool' + workload_identity_pool_provider_id: 'example-prvdr' - !ruby/object:Provider::Terraform::Examples name: 'iam_workload_identity_pool_provider_oidc_upload_key' primary_resource_id: 'example' @@ -205,10 +217,11 @@ properties: name: aws description: An Amazon Web Services identity provider. Not compatible with the property - oidc. + oidc or saml. exactly_one_of: - aws - oidc + - saml properties: - !ruby/object:Api::Type::String name: accountId @@ -218,10 +231,11 @@ properties: name: oidc description: An OpenId Connect 1.0 identity provider. Not compatible with the property - aws. + aws or saml. exactly_one_of: - aws - oidc + - saml update_mask_fields: - 'oidc.allowed_audiences' - 'oidc.issuer_uri' @@ -274,3 +288,17 @@ properties: } ``` required: false + - !ruby/object:Api::Type::NestedObject + name: saml + description: + An SAML 2.0 identity provider. Not compatible with the property + oidc or aws. + exactly_one_of: + - aws + - oidc + - saml + properties: + - !ruby/object:Api::Type::String + name: idpMetadataXml + description: SAML Identity provider configuration metadata xml doc. + required: true diff --git a/mmv1/products/iambeta/product.yaml b/mmv1/products/iambeta/product.yaml index 55786f97e1ec..5d92e8bd8737 100644 --- a/mmv1/products/iambeta/product.yaml +++ b/mmv1/products/iambeta/product.yaml @@ -18,7 +18,7 @@ legacy_name: iam versions: - !ruby/object:Api::Product::Version name: beta - base_url: https://iam.googleapis.com/v1beta/ + base_url: https://iam.googleapis.com/v1/ - !ruby/object:Api::Product::Version name: ga base_url: https://iam.googleapis.com/v1/ diff --git a/mmv1/templates/terraform/examples/iam_workload_identity_pool_provider_saml_basic.tf.erb b/mmv1/templates/terraform/examples/iam_workload_identity_pool_provider_saml_basic.tf.erb new file mode 100644 index 000000000000..6ec26e598c90 --- /dev/null +++ b/mmv1/templates/terraform/examples/iam_workload_identity_pool_provider_saml_basic.tf.erb @@ -0,0 +1,16 @@ +resource "google_iam_workload_identity_pool" "pool" { + workload_identity_pool_id = "<%= ctx[:vars]["workload_identity_pool_id"] %>" +} + +resource "google_iam_workload_identity_pool_provider" "<%= ctx[:primary_resource_id] %>" { + workload_identity_pool_id = google_iam_workload_identity_pool.pool.workload_identity_pool_id + workload_identity_pool_provider_id = "<%= ctx[:vars]["workload_identity_pool_provider_id"] %>" + attribute_mapping = { + "google.subject" = "assertion.arn" + "attribute.aws_account" = "assertion.account" + "attribute.environment" = "assertion.arn.contains(\":instance-profile/Production\") ? \"prod\" : \"test\"" + } + saml { + idp_metadata_xml = file("test-fixtures/metadata.xml") + } +} diff --git a/mmv1/templates/terraform/examples/iam_workload_identity_pool_provider_saml_full.tf.erb b/mmv1/templates/terraform/examples/iam_workload_identity_pool_provider_saml_full.tf.erb new file mode 100644 index 000000000000..7bfa6bea5054 --- /dev/null +++ b/mmv1/templates/terraform/examples/iam_workload_identity_pool_provider_saml_full.tf.erb @@ -0,0 +1,19 @@ +resource "google_iam_workload_identity_pool" "pool" { + workload_identity_pool_id = "<%= ctx[:vars]["workload_identity_pool_id"] %>" +} + +resource "google_iam_workload_identity_pool_provider" "<%= ctx[:primary_resource_id] %>" { + workload_identity_pool_id = google_iam_workload_identity_pool.pool.workload_identity_pool_id + workload_identity_pool_provider_id = "<%= ctx[:vars]["workload_identity_pool_provider_id"] %>" + display_name = "Name of provider" + description = "SAML 2.0 identity pool provider for automated test" + disabled = true + attribute_mapping = { + "google.subject" = "assertion.arn" + "attribute.aws_account" = "assertion.account" + "attribute.environment" = "assertion.arn.contains(\":instance-profile/Production\") ? \"prod\" : \"test\"" + } + saml { + idp_metadata_xml = file("test-fixtures/metadata.xml") + } +} diff --git a/mmv1/third_party/terraform/services/iambeta/test-fixtures/metadata.xml b/mmv1/third_party/terraform/services/iambeta/test-fixtures/metadata.xml new file mode 100644 index 000000000000..ce9acb1dbeb3 --- /dev/null +++ b/mmv1/third_party/terraform/services/iambeta/test-fixtures/metadata.xml @@ -0,0 +1,14 @@ + + + + + + + + MIIC3jCCAcagAwIBAgIBATANBgkqhkiG9w0BAQsFADAeMRwwGgYDVQQDExNNeSBTZXJ2aWNlIFByb3ZpZGVyMB4XDTIzMTEzMDAwMzc1NVoXDTM4MTEyNjAwMzc1NVowHjEcMBoGA1UEAxMTTXkgU2VydmljZSBQcm92aWRlcjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAM1W92rSmU8MbqNhIL6E0OEg3H0t65wqEMqg8r99yjZq/KW9ps6PoO4MZVIFLAMRtXvoTrVv6oPYCoc1T5u46Wl16qldGGRWk44Lr03L5xRqhKAVPfHMGL3HO0aaSnrVjKuFbGgjoMxJRSHHmU2Q6FU5AaRP2zodnkNhsZZt+x4dwKkow94uvTIQc1avdfCqKB5PeS2+2/oeFoNc78Pbnok+wUKZbhHwzYQdmSZ8NJnsIDpqf1IGcXyMMEqGkHmAa3O3sme4gYkl/0MbHcCSFwyaW+gUFy0uHz/amTiiZNoJ+0TPoNLz0XLXYv7ACxz5mTa/DaMvZ1zRsKi6wOzoSOUCAwEAAaMnMCUwDgYDVR0PAQH/BAQDAgKEMBMGA1UdJQQMMAoGCCsGAQUFBwMBMA0GCSqGSIb3DQEBCwUAA4IBAQBFyfbJBaKuHSNd4Kx73ozSV2no96ctY3ZciUjCmUArLeqg0qQwkwXP6AoOd5bwIpKAmgHJgyv7T6GGHA+w9qNXuQxU86ph3gPRlYCB3+pnaiDq/iUECE09pleQJr//Am5x2Z1Bc8itMUPRuVsU873DASOFho3HZykmgS6x4JwAqCO2dBNoYHi+N9DsbQ9MatNdUOvrYgCTzbwRuwsDwLno/OSFQYTLqZVTW+NuEL/Axtx/yu9wKvuTzcD2MqPHaisy35PkPbJLsCauFJn8CAC4U5xKEkwsaumANeMZzzvAv8J2SIyjagLI+iF06AmCQNeytSvYxptxt7hhTg6uJVxS + + + + + + \ No newline at end of file From d6ce7020cd1e2a490a93c91ccc643531dba8bae7 Mon Sep 17 00:00:00 2001 From: Mauricio Alvarez Leon <65101411+BBBmau@users.noreply.github.com> Date: Thu, 7 Dec 2023 05:17:54 -0800 Subject: [PATCH 22/44] add `numeric_id` field into `google_compute_network` resource (#9473) * add numberic_id field * add flattener, encoders, and id field for numeric_id use * WIP: add numberId test * add value checks for id and numeric_id * add strconv * use decoder to store id into numericId after API request * remove network check and add suffix variable * add endlines * typo * Updated test to explicitly check numeric_id and id with ResourceAttr * update regex to be more strict on numeric_id check --- mmv1/products/compute/Network.yaml | 8 +++++ .../terraform/decoders/compute_network.go.erb | 2 ++ .../terraform/encoders/compute_network.go.erb | 2 ++ .../update_encoder/compute_network.go.erb | 2 ++ .../resource_compute_network_test.go.erb | 30 +++++++++++++++++++ 5 files changed, 44 insertions(+) create mode 100644 mmv1/templates/terraform/decoders/compute_network.go.erb create mode 100644 mmv1/templates/terraform/encoders/compute_network.go.erb create mode 100644 mmv1/templates/terraform/update_encoder/compute_network.go.erb diff --git a/mmv1/products/compute/Network.yaml b/mmv1/products/compute/Network.yaml index 4c35f622d19a..895c5a5105df 100644 --- a/mmv1/products/compute/Network.yaml +++ b/mmv1/products/compute/Network.yaml @@ -70,6 +70,9 @@ virtual_fields: If set to `true`, default routes (`0.0.0.0/0`) will be deleted immediately after network creation. Defaults to `false`. custom_code: !ruby/object:Provider::Terraform::CustomCode + decoder: templates/terraform/decoders/compute_network.go.erb + encoder: templates/terraform/encoders/compute_network.go.erb + update_encoder: templates/terraform/update_encoder/compute_network.go.erb post_create: templates/terraform/post_create/compute_network_delete_default_route.erb properties: - !ruby/object:Api::Type::String @@ -100,6 +103,11 @@ properties: required: true validation: !ruby/object:Provider::Terraform::Validation function: 'verify.ValidateGCEName' + - !ruby/object:Api::Type::String + name: 'numericId' + description: | + The unique identifier for the resource. This identifier is defined by the server. + output: true - !ruby/object:Api::Type::Boolean name: 'autoCreateSubnetworks' description: | diff --git a/mmv1/templates/terraform/decoders/compute_network.go.erb b/mmv1/templates/terraform/decoders/compute_network.go.erb new file mode 100644 index 000000000000..d1dfb685ae0b --- /dev/null +++ b/mmv1/templates/terraform/decoders/compute_network.go.erb @@ -0,0 +1,2 @@ +res["numericId"] = res["id"] // stores unique id into numericId attribute before it's changed to path format +return res, nil diff --git a/mmv1/templates/terraform/encoders/compute_network.go.erb b/mmv1/templates/terraform/encoders/compute_network.go.erb new file mode 100644 index 000000000000..084107f2772e --- /dev/null +++ b/mmv1/templates/terraform/encoders/compute_network.go.erb @@ -0,0 +1,2 @@ +delete(obj, "numeric_id") // Field doesn't exist in the API +return obj, nil diff --git a/mmv1/templates/terraform/update_encoder/compute_network.go.erb b/mmv1/templates/terraform/update_encoder/compute_network.go.erb new file mode 100644 index 000000000000..084107f2772e --- /dev/null +++ b/mmv1/templates/terraform/update_encoder/compute_network.go.erb @@ -0,0 +1,2 @@ +delete(obj, "numeric_id") // Field doesn't exist in the API +return obj, nil diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_network_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_network_test.go.erb index 38f9ff58d49b..ce9d2f2406aa 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_network_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_network_test.go.erb @@ -3,8 +3,10 @@ package compute_test import ( "fmt" + "regexp" "testing" "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" @@ -106,6 +108,34 @@ func TestAccComputeNetwork_routingModeAndUpdate(t *testing.T) { }) } +func TestAccComputeNetwork_numericId(t *testing.T) { + t.Parallel() + suffixName := acctest.RandString(t, 10) + networkName := fmt.Sprintf("tf-test-network-basic-%s", suffixName) + projectId := envvar.GetTestProjectFromEnv() + networkId := fmt.Sprintf("projects/%v/global/networks/%v", projectId, networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeNetworkDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeNetwork_basic(suffixName), + Check: resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr("google_compute_network.bar", "numeric_id",regexp.MustCompile("^\\d{1,}$")), + resource.TestCheckResourceAttr("google_compute_network.bar", "id", networkId), + ), + }, + { + ResourceName: "google_compute_network.bar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccComputeNetwork_default_routing_mode(t *testing.T) { t.Parallel() From 7ebfe21ca18a66c8326218a496858b9f4a8aea28 Mon Sep 17 00:00:00 2001 From: Sarah French <15078782+SarahFrench@users.noreply.github.com> Date: Thu, 7 Dec 2023 13:25:48 +0000 Subject: [PATCH 23/44] Update regex in `TestAccContainerCluster_errorNoClusterCreated` acc test (#9594) --- .../services/container/resource_container_cluster_test.go.erb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mmv1/third_party/terraform/services/container/resource_container_cluster_test.go.erb b/mmv1/third_party/terraform/services/container/resource_container_cluster_test.go.erb index 2d04467b5c0f..c79ed5c9779a 100644 --- a/mmv1/third_party/terraform/services/container/resource_container_cluster_test.go.erb +++ b/mmv1/third_party/terraform/services/container/resource_container_cluster_test.go.erb @@ -3705,7 +3705,7 @@ func TestAccContainerCluster_errorNoClusterCreated(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccContainerCluster_withInvalidLocation("wonderland"), - ExpectError: regexp.MustCompile(`Location "wonderland" does not exist.`), + ExpectError: regexp.MustCompile(`(Location "wonderland" does not exist)|(Permission denied on 'locations\/wonderland' \(or it may not exist\))`), }, }, }) From 4ca0c3ca2e512950578ecc500cba7e82bc5a13e6 Mon Sep 17 00:00:00 2001 From: Akira Noda <61897166+tsugumi-sys@users.noreply.github.com> Date: Fri, 8 Dec 2023 00:49:00 +0900 Subject: [PATCH 24/44] Doc: Adding description of unique constraints on display_name argument (#9589) --- mmv1/products/datacatalog/Taxonomy.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/mmv1/products/datacatalog/Taxonomy.yaml b/mmv1/products/datacatalog/Taxonomy.yaml index e348865410aa..c7016b11cf8d 100644 --- a/mmv1/products/datacatalog/Taxonomy.yaml +++ b/mmv1/products/datacatalog/Taxonomy.yaml @@ -66,6 +66,7 @@ properties: name: 'displayName' description: | User defined name of this taxonomy. + The taxonomy display name must be unique within an organization. It must: contain only unicode letters, numbers, underscores, dashes and spaces; not start or end with spaces; and be at most 200 bytes long when encoded in UTF-8. From ac1397e126a49535fac419ede1524ef3eb464556 Mon Sep 17 00:00:00 2001 From: Sam Eiderman Date: Thu, 7 Dec 2023 19:01:03 +0200 Subject: [PATCH 25/44] Do not replace GKE's NodePool resource on machineType/diskType/diskSizeGb change (#9575) --- .../services/container/node_config.go.erb | 3 - .../resource_container_node_pool.go.erb | 33 +++++++++ .../resource_container_node_pool_test.go.erb | 68 +++++++++++++++++++ 3 files changed, 101 insertions(+), 3 deletions(-) diff --git a/mmv1/third_party/terraform/services/container/node_config.go.erb b/mmv1/third_party/terraform/services/container/node_config.go.erb index b5ce9e2e29bc..af3492f29413 100644 --- a/mmv1/third_party/terraform/services/container/node_config.go.erb +++ b/mmv1/third_party/terraform/services/container/node_config.go.erb @@ -72,7 +72,6 @@ func schemaNodeConfig() *schema.Schema { Type: schema.TypeInt, Optional: true, Computed: true, - ForceNew: true, ValidateFunc: validation.IntAtLeast(10), Description: `Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.`, }, @@ -81,7 +80,6 @@ func schemaNodeConfig() *schema.Schema { Type: schema.TypeString, Optional: true, Computed: true, - ForceNew: true, Description: `Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd`, }, @@ -283,7 +281,6 @@ func schemaNodeConfig() *schema.Schema { Type: schema.TypeString, Optional: true, Computed: true, - ForceNew: true, Description: `The name of a Google Compute Engine machine type.`, }, diff --git a/mmv1/third_party/terraform/services/container/resource_container_node_pool.go.erb b/mmv1/third_party/terraform/services/container/resource_container_node_pool.go.erb index 3ae142f56f20..a4208ccfd799 100644 --- a/mmv1/third_party/terraform/services/container/resource_container_node_pool.go.erb +++ b/mmv1/third_party/terraform/services/container/resource_container_node_pool.go.erb @@ -1464,6 +1464,39 @@ func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *Node } } + if d.HasChange("node_config.0.disk_size_gb") || + d.HasChange("node_config.0.disk_type") || + d.HasChange("node_config.0.machine_type") { + req := &container.UpdateNodePoolRequest{ + Name: name, + DiskSizeGb: int64(d.Get("node_config.0.disk_size_gb").(int)), + DiskType: d.Get("node_config.0.disk_type").(string), + MachineType: d.Get("node_config.0.machine_type").(string), + } + updateF := func() error { + clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name), req) + if config.UserProjectOverride { + clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) + } + op, err := clusterNodePoolsUpdateCall.Do() + if err != nil { + return err + } + + // Wait until it's updated + return ContainerOperationWait(config, op, + nodePoolInfo.project, + nodePoolInfo.location, + "updating GKE node pool disk_size_gb/disk_type/machine_type", userAgent, + timeout) + } + + if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + return err + } + log.Printf("[INFO] Updated disk disk_size_gb/disk_type/machine_type for Node Pool %s", d.Id()) + } + if d.HasChange(prefix + "node_config.0.taint") { req := &container.UpdateNodePoolRequest{ Name: name, diff --git a/mmv1/third_party/terraform/services/container/resource_container_node_pool_test.go.erb b/mmv1/third_party/terraform/services/container/resource_container_node_pool_test.go.erb index f821b5a5a413..e06180e6c0e9 100644 --- a/mmv1/third_party/terraform/services/container/resource_container_node_pool_test.go.erb +++ b/mmv1/third_party/terraform/services/container/resource_container_node_pool_test.go.erb @@ -280,6 +280,42 @@ func TestAccContainerNodePool_withTaintsUpdate(t *testing.T) { }) } +func TestAccContainerNodePool_withMachineAndDiskUpdate(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + nodePool := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_basic(cluster, nodePool, networkName, subnetworkName), + }, + { + ResourceName: "google_container_node_pool.np", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccContainerNodePool_withMachineAndDiskUpdate(cluster, nodePool, networkName, subnetworkName), + }, + { + ResourceName: "google_container_node_pool.np", + ImportState: true, + ImportStateVerify: true, + // autoscaling.# = 0 is equivalent to no autoscaling at all, + // but will still cause an import diff + ImportStateVerifyIgnore: []string{"autoscaling.#", "node_config.0.taint"}, + }, + }, + }) +} + func TestAccContainerNodePool_withReservationAffinity(t *testing.T) { t.Parallel() @@ -2664,6 +2700,38 @@ resource "google_container_node_pool" "np" { `, cluster, networkName, subnetworkName, np) } +func testAccContainerNodePool_withMachineAndDiskUpdate(cluster, np, networkName, subnetworkName string) string { + return fmt.Sprintf(` +provider "google" { + alias = "user-project-override" + user_project_override = true +} +resource "google_container_cluster" "cluster" { + provider = google.user-project-override + name = "%s" + location = "us-central1-a" + initial_node_count = 3 + deletion_protection = false + network = "%s" + subnetwork = "%s" +} + +resource "google_container_node_pool" "np" { + provider = google.user-project-override + name = "%s" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + initial_node_count = 2 + + node_config { + machine_type = "n1-standard-1" // can't be e2 because of local-ssd + disk_size_gb = 15 + disk_type = "pd-ssd" + } +} +`, cluster, networkName, subnetworkName, np) +} + func testAccContainerNodePool_withReservationAffinity(cluster, np, networkName, subnetworkName string) string { return fmt.Sprintf(` data "google_container_engine_versions" "central1a" { From 8abd3028dea15486c2d78a52897cc5d617560dcb Mon Sep 17 00:00:00 2001 From: Zhenhua Li Date: Thu, 7 Dec 2023 09:31:07 -0800 Subject: [PATCH 26/44] Remove activesupport (#9579) * Copy the ActiveSupport camelize code w/ slight modifications * Remove activesupport gem * Simplify camelize * Slightly simplify and also rubocop * Also remove the dependency in tests * Fix tgc --------- Co-authored-by: Riley Karson --- mmv1/Gemfile | 1 - mmv1/Gemfile.lock | 12 ------------ mmv1/api/type.rb | 2 +- mmv1/compiler.rb | 1 - mmv1/google/extensions.rb | 11 +++++++++++ mmv1/google/string_utils.rb | 18 ++++++++++++++++++ mmv1/provider/terraform_tgc.rb | 1 + mmv1/spec/compiler_spec.rb | 1 - .../terraform/datasource_iam.html.markdown.erb | 2 +- .../terraform/resource_iam.html.markdown.erb | 2 +- 10 files changed, 33 insertions(+), 18 deletions(-) diff --git a/mmv1/Gemfile b/mmv1/Gemfile index 9fbcb00be24e..b0b4949b38d6 100644 --- a/mmv1/Gemfile +++ b/mmv1/Gemfile @@ -1,6 +1,5 @@ source 'https://rubygems.org' -gem 'activesupport' gem 'binding_of_caller' gem 'openapi_parser', '~> 1.0.0' gem 'parallel' diff --git a/mmv1/Gemfile.lock b/mmv1/Gemfile.lock index 5c7e999f4191..f0bb3296cd4f 100644 --- a/mmv1/Gemfile.lock +++ b/mmv1/Gemfile.lock @@ -1,22 +1,13 @@ GEM remote: https://rubygems.org/ specs: - activesupport (7.0.4.2) - concurrent-ruby (~> 1.0, >= 1.0.2) - i18n (>= 1.6, < 2) - minitest (>= 5.1) - tzinfo (~> 2.0) ast (2.4.2) binding_of_caller (1.0.0) debug_inspector (>= 0.0.1) - concurrent-ruby (1.2.0) debug_inspector (1.1.0) diff-lcs (1.5.0) - i18n (1.12.0) - concurrent-ruby (~> 1.0) json (2.6.3) metaclass (0.0.4) - minitest (5.17.0) mocha (1.3.0) metaclass (~> 0.0.1) openapi_parser (1.0.0) @@ -53,15 +44,12 @@ GEM rubocop-ast (1.24.1) parser (>= 3.1.1.0) ruby-progressbar (1.11.0) - tzinfo (2.0.6) - concurrent-ruby (~> 1.0) unicode-display_width (2.4.2) PLATFORMS ruby DEPENDENCIES - activesupport binding_of_caller mocha (~> 1.3.0) openapi_parser (~> 1.0.0) diff --git a/mmv1/api/type.rb b/mmv1/api/type.rb index d9bac98786b2..90487dd8808d 100644 --- a/mmv1/api/type.rb +++ b/mmv1/api/type.rb @@ -425,7 +425,7 @@ def min_version end def exact_version - return nil if @exact_version.nil? || @exact_version.blank? + return nil if @exact_version.nil? || @exact_version.empty? @__resource.__product.version_obj(@exact_version) end diff --git a/mmv1/compiler.rb b/mmv1/compiler.rb index adfa9655034e..1f2cdc5f82c3 100755 --- a/mmv1/compiler.rb +++ b/mmv1/compiler.rb @@ -21,7 +21,6 @@ # generation. ENV['TZ'] = 'UTC' -require 'active_support/inflector' require 'api/compiler' require 'openapi_generate/parser' require 'google/logger' diff --git a/mmv1/google/extensions.rb b/mmv1/google/extensions.rb index 8ba95cb3b744..4cd9d0718ac7 100644 --- a/mmv1/google/extensions.rb +++ b/mmv1/google/extensions.rb @@ -37,4 +37,15 @@ def plural def title Google::StringUtils.title(self) end + + def camelize(first_letter = :upper) + case first_letter + when :upper + Google::StringUtils.camelize(self, true) + when :lower + Google::StringUtils.camelize(self, false) + else + raise ArgumentError, 'Invalid option, use either :upper or :lower.' + end + end end diff --git a/mmv1/google/string_utils.rb b/mmv1/google/string_utils.rb index 33575771fe99..0a3d25730c6d 100644 --- a/mmv1/google/string_utils.rb +++ b/mmv1/google/string_utils.rb @@ -74,5 +74,23 @@ def self.plural(source) "#{source}s" end + + # Slimmed down version of ActiveSupport::Inflector code + def self.camelize(term, uppercase_first_letter) + acronyms_camelize_regex = /^(?:(?=a)b(?=\b|[A-Z_])|\w)/ + + string = term.to_s + string = if uppercase_first_letter + string.sub(/^[a-z\d]*/) { |match| match.capitalize! || match } + else + string.sub(acronyms_camelize_regex) { |match| match.downcase! || match } + end + # handle snake case + string.gsub!(/(?:_)([a-z\d]*)/i) do + word = ::Regexp.last_match(1) + word.capitalize! || word + end + string + end end end diff --git a/mmv1/provider/terraform_tgc.rb b/mmv1/provider/terraform_tgc.rb index e8fad32fe852..9f28816857de 100644 --- a/mmv1/provider/terraform_tgc.rb +++ b/mmv1/provider/terraform_tgc.rb @@ -13,6 +13,7 @@ require 'provider/terraform_oics' require 'fileutils' +require 'set' module Provider # Code generator for a library converting terraform state to gcp objects. diff --git a/mmv1/spec/compiler_spec.rb b/mmv1/spec/compiler_spec.rb index bc57c6e6a9ae..e3a8627f0599 100644 --- a/mmv1/spec/compiler_spec.rb +++ b/mmv1/spec/compiler_spec.rb @@ -13,7 +13,6 @@ require 'spec_helper' require 'api/compiler' -require 'active_support/inflector' describe Api::Compiler do context 'should fail if file does not exist' do diff --git a/mmv1/templates/terraform/datasource_iam.html.markdown.erb b/mmv1/templates/terraform/datasource_iam.html.markdown.erb index 39fd93d86128..cd40438e57fe 100644 --- a/mmv1/templates/terraform/datasource_iam.html.markdown.erb +++ b/mmv1/templates/terraform/datasource_iam.html.markdown.erb @@ -99,7 +99,7 @@ The following arguments are supported: * `<%= param.name.underscore -%>` - (Required) <%= param.description -%> Used to find the parent resource to bind the IAM policy to <% end -%> <% end -%> -<% if object.iam_policy.base_url.present? -%> +<% if !object.iam_policy.base_url.nil? -%> <% if object.iam_policy.base_url.include?("{{project}}") -%> <%# The following new line allow for project to be bullet-formatted properly. -%> diff --git a/mmv1/templates/terraform/resource_iam.html.markdown.erb b/mmv1/templates/terraform/resource_iam.html.markdown.erb index 2663feb0ae0c..0d92349e0257 100644 --- a/mmv1/templates/terraform/resource_iam.html.markdown.erb +++ b/mmv1/templates/terraform/resource_iam.html.markdown.erb @@ -228,7 +228,7 @@ The following arguments are supported: * `<%= param.name.underscore -%>` - (Required) <%= param.description -%> Used to find the parent resource to bind the IAM policy to <% end -%> <% end -%> -<% if object.iam_policy.base_url.present? -%> +<% if !object.iam_policy.base_url.nil? -%> <% if object.iam_policy.base_url.include?("{{project}}") -%> <%# The following new line allow for project to be bullet-formatted properly. -%> From ec7eec622d9aefb7c3d490dc9c3df02209cd8ea0 Mon Sep 17 00:00:00 2001 From: Nick Elliot Date: Thu, 7 Dec 2023 10:14:28 -0800 Subject: [PATCH 27/44] Revert " add support for IAM Group authentication to google_sql_user (#9578)" (#9595) This reverts commit 05c4410c0e599f33ab255e3820187855c82c7739. --- .../services/sql/resource_sql_user.go | 6 ++---- .../services/sql/resource_sql_user_test.go | 11 ---------- .../website/docs/r/sql_user.html.markdown | 21 +------------------ 3 files changed, 3 insertions(+), 35 deletions(-) diff --git a/mmv1/third_party/terraform/services/sql/resource_sql_user.go b/mmv1/third_party/terraform/services/sql/resource_sql_user.go index 15bf8061b05d..599e55df39f7 100644 --- a/mmv1/third_party/terraform/services/sql/resource_sql_user.go +++ b/mmv1/third_party/terraform/services/sql/resource_sql_user.go @@ -102,10 +102,8 @@ func ResourceSqlUser() *schema.Resource { ForceNew: true, DiffSuppressFunc: tpgresource.EmptyOrDefaultStringSuppress("BUILT_IN"), Description: `The user type. It determines the method to authenticate the user during login. - The default is the database's built-in user type. Flags include "BUILT_IN", "CLOUD_IAM_USER", "CLOUD_IAM_SERVICE_ACCOUNT", - "CLOUD_IAM_GROUP", "CLOUD_IAM_GROUP_USER" or "CLOUD_IAM_GROUP_SERVICE_ACCOUNT".`, - ValidateFunc: validation.StringInSlice([]string{"BUILT_IN", "CLOUD_IAM_USER", "CLOUD_IAM_SERVICE_ACCOUNT", - "CLOUD_IAM_GROUP", "CLOUD_IAM_GROUP_USER", "CLOUD_IAM_GROUP_SERVICE_ACCOUNT", ""}, false), + The default is the database's built-in user type. Flags include "BUILT_IN", "CLOUD_IAM_USER", or "CLOUD_IAM_SERVICE_ACCOUNT".`, + ValidateFunc: validation.StringInSlice([]string{"BUILT_IN", "CLOUD_IAM_USER", "CLOUD_IAM_SERVICE_ACCOUNT", ""}, false), }, "sql_server_user_details": { Type: schema.TypeList, diff --git a/mmv1/third_party/terraform/services/sql/resource_sql_user_test.go b/mmv1/third_party/terraform/services/sql/resource_sql_user_test.go index 9091ef56a534..efcf0d5f5962 100644 --- a/mmv1/third_party/terraform/services/sql/resource_sql_user_test.go +++ b/mmv1/third_party/terraform/services/sql/resource_sql_user_test.go @@ -26,7 +26,6 @@ func TestAccSqlUser_mysql(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckGoogleSqlUserExists(t, "google_sql_user.user1"), testAccCheckGoogleSqlUserExists(t, "google_sql_user.user2"), - testAccCheckGoogleSqlUserExists(t, "google_sql_user.user3"), ), }, { @@ -35,7 +34,6 @@ func TestAccSqlUser_mysql(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckGoogleSqlUserExists(t, "google_sql_user.user1"), testAccCheckGoogleSqlUserExists(t, "google_sql_user.user2"), - testAccCheckGoogleSqlUserExists(t, "google_sql_user.user3"), ), }, { @@ -313,15 +311,6 @@ resource "google_sql_user" "user2" { instance = google_sql_database_instance.instance.name host = "gmail.com" password = "hunter2" - type = "CLOUD_IAM_USER" -} - -resource "google_sql_user" "user3" { - name = "admin" - instance = google_sql_database_instance.instance.name - host = "gmail.com" - password = "hunter3" - type = "CLOUD_IAM_GROUP" } `, instance, password) } diff --git a/mmv1/third_party/terraform/website/docs/r/sql_user.html.markdown b/mmv1/third_party/terraform/website/docs/r/sql_user.html.markdown index 0072e8aa762b..9b96d4463a22 100644 --- a/mmv1/third_party/terraform/website/docs/r/sql_user.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/sql_user.html.markdown @@ -72,24 +72,6 @@ resource "google_sql_user" "iam_service_account_user" { instance = google_sql_database_instance.main.name type = "CLOUD_IAM_SERVICE_ACCOUNT" } - -resource "google_sql_user" "iam_group" { - name = "group1@example.com" - instance = google_sql_database_instance.main.name - type = "CLOUD_IAM_GROUP" -} - -resource "google_sql_user" "iam_group_user" { - name = "group_user1@example.com" - instance = google_sql_database_instance.main.name - type = "CLOUD_IAM_GROUP_USER" -} - -resource "google_sql_user" "iam_group_service_account_user" { - name = "my-service-account@example.iam.gserviceaccount.com" - instance = google_sql_database_instance.main.name - type = "CLOUD_IAM_GROUP_SERVICE_ACCOUNT" -} ``` ## Argument Reference @@ -109,8 +91,7 @@ The following arguments are supported: * `type` - (Optional) The user type. It determines the method to authenticate the user during login. The default is the database's built-in user type. Flags - include "BUILT_IN", "CLOUD_IAM_USER", "CLOUD_IAM_SERVICE_ACCOUNT", - "CLOUD_IAM_GROUP", "CLOUD_IAM_GROUP_USER" or "CLOUD_IAM_GROUP_SERVICE_ACCOUNT". + include "BUILT_IN", "CLOUD_IAM_USER", or "CLOUD_IAM_SERVICE_ACCOUNT". * `deletion_policy` - (Optional) The deletion policy for the user. Setting `ABANDON` allows the resource to be abandoned rather than deleted. This is useful From 4153866aaf018b769837b565a1088095fb25ae61 Mon Sep 17 00:00:00 2001 From: Alex Coomans Date: Thu, 7 Dec 2023 12:06:18 -0800 Subject: [PATCH 28/44] Add remove_instance_on_destroy option to per-compute instance config resources (#9588) It's a bit counterintuitive that creating a per-instance config in an IGM spins up an instance but destroying it leaves the instance behind. Also fixed a bug related to the operation to detach the disk from the instance failing due to the instance having been deleted. Other than the tests for the new per-instance config behavior, I was unable to devise an isolated test to trigger this issue. Fixes hashicorp/terraform-provider-google#9042 & hashicorp/terraform-provider-google#16621. --- mmv1/products/compute/PerInstanceConfig.yaml | 11 +- .../compute/RegionPerInstanceConfig.yaml | 11 +- .../custom_delete/per_instance_config.go.erb | 35 ++- .../region_per_instance_config.go.erb | 34 ++- .../compute_per_instance_config.go.erb | 3 - .../terraform/pre_delete/detach_disk.erb | 3 +- ...ce_compute_per_instance_config_test.go.erb | 222 +++++++++++++++++- ...ute_region_per_instance_config_test.go.erb | 181 ++++++++++++++ .../services/compute/stateful_mig_polling.go | 123 ++++++++++ .../terraform/tpgresource/common_operation.go | 2 +- 10 files changed, 598 insertions(+), 27 deletions(-) delete mode 100644 mmv1/templates/terraform/pre_delete/compute_per_instance_config.go.erb diff --git a/mmv1/products/compute/PerInstanceConfig.yaml b/mmv1/products/compute/PerInstanceConfig.yaml index 2c5d8e494298..fd37ee09d9fe 100644 --- a/mmv1/products/compute/PerInstanceConfig.yaml +++ b/mmv1/products/compute/PerInstanceConfig.yaml @@ -100,8 +100,18 @@ virtual_fields: - :REFRESH - :NONE default_value: :REPLACE + - !ruby/object:Api::Type::Boolean + name: 'remove_instance_on_destroy' + conflicts: + - remove_instance_state_on_destroy + description: | + When true, deleting this config will immediately remove the underlying instance. + When false, deleting this config will use the behavior as determined by remove_instance_on_destroy. + default_value: false - !ruby/object:Api::Type::Boolean name: 'remove_instance_state_on_destroy' + conflicts: + - remove_instance_on_destroy description: | When true, deleting this config will immediately remove any specified state from the underlying instance. When false, deleting this config will *not* immediately remove any state from the underlying instance. @@ -110,7 +120,6 @@ virtual_fields: custom_code: !ruby/object:Provider::Terraform::CustomCode encoder: templates/terraform/encoders/compute_per_instance_config.go.erb update_encoder: templates/terraform/update_encoder/compute_per_instance_config.go.erb - pre_delete: templates/terraform/pre_delete/compute_per_instance_config.go.erb post_update: templates/terraform/post_update/compute_per_instance_config.go.erb custom_delete: templates/terraform/custom_delete/per_instance_config.go.erb parameters: diff --git a/mmv1/products/compute/RegionPerInstanceConfig.yaml b/mmv1/products/compute/RegionPerInstanceConfig.yaml index 710864fcdaed..307756f20d38 100644 --- a/mmv1/products/compute/RegionPerInstanceConfig.yaml +++ b/mmv1/products/compute/RegionPerInstanceConfig.yaml @@ -101,8 +101,18 @@ virtual_fields: - :REFRESH - :NONE default_value: :REPLACE + - !ruby/object:Api::Type::Boolean + name: 'remove_instance_on_destroy' + conflicts: + - remove_instance_state_on_destroy + description: | + When true, deleting this config will immediately remove the underlying instance. + When false, deleting this config will use the behavior as determined by remove_instance_on_destroy. + default_value: false - !ruby/object:Api::Type::Boolean name: 'remove_instance_state_on_destroy' + conflicts: + - remove_instance_on_destroy description: | When true, deleting this config will immediately remove any specified state from the underlying instance. When false, deleting this config will *not* immediately remove any state from the underlying instance. @@ -111,7 +121,6 @@ virtual_fields: custom_code: !ruby/object:Provider::Terraform::CustomCode encoder: templates/terraform/encoders/compute_per_instance_config.go.erb update_encoder: templates/terraform/update_encoder/compute_per_instance_config.go.erb - pre_delete: templates/terraform/pre_delete/compute_per_instance_config.go.erb post_update: templates/terraform/post_update/compute_region_per_instance_config.go.erb custom_delete: templates/terraform/custom_delete/region_per_instance_config.go.erb parameters: diff --git a/mmv1/templates/terraform/custom_delete/per_instance_config.go.erb b/mmv1/templates/terraform/custom_delete/per_instance_config.go.erb index ce069c2ff827..71a62d09cd76 100644 --- a/mmv1/templates/terraform/custom_delete/per_instance_config.go.erb +++ b/mmv1/templates/terraform/custom_delete/per_instance_config.go.erb @@ -10,14 +10,31 @@ transport_tpg.MutexStore.Lock(lockName) defer transport_tpg.MutexStore.Unlock(lockName) - url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/instanceGroupManagers/{{instance_group_manager}}/deletePerInstanceConfigs") + var url string + if d.Get("remove_instance_on_destroy").(bool) { + url, err = tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/instanceGroupManagers/{{instance_group_manager}}/deleteInstances") + } else { + url, err = tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/instanceGroupManagers/{{instance_group_manager}}/deletePerInstanceConfigs") + } if err != nil { return err } var obj map[string]interface{} - obj = map[string]interface{}{ - "names": [1]string{d.Get("name").(string)}, + if d.Get("remove_instance_on_destroy").(bool) { + // Instance name in deleteInstances request must include zone + instanceName, err := tpgresource.ReplaceVars(d, config, "zones/{{zone}}/instances/{{name}}") + if err != nil { + return err + } + + obj = map[string]interface{}{ + "instances": [1]string{instanceName}, + } + } else { + obj = map[string]interface{}{ + "names": [1]string{d.Get("name").(string)}, + } } log.Printf("[DEBUG] Deleting PerInstanceConfig %q", d.Id()) @@ -42,8 +59,14 @@ return err } - // Potentially delete the state managed by this config - if d.Get("remove_instance_state_on_destroy").(bool) { + if d.Get("remove_instance_on_destroy").(bool) { + err = transport_tpg.PollingWaitTime(resourceComputePerInstanceConfigInstancePollRead(d, meta, d.Get("name").(string)), PollCheckInstanceConfigInstanceDeleted, "Deleting PerInstanceConfig", d.Timeout(schema.TimeoutDelete), 1) + if err != nil { + return fmt.Errorf("Error waiting for instance delete on PerInstanceConfig %q: %s", d.Id(), err) + } + } else if d.Get("remove_instance_state_on_destroy").(bool) { + // Potentially delete the state managed by this config + // Instance name in applyUpdatesToInstances request must include zone instanceName, err := tpgresource.ReplaceVars(d, config, "zones/{{zone}}/instances/{{name}}") if err != nil { @@ -85,7 +108,7 @@ err = transport_tpg.PollingWaitTime(resourceComputePerInstanceConfigPollRead(d, meta), PollCheckInstanceConfigDeleted, "Deleting PerInstanceConfig", d.Timeout(schema.TimeoutDelete), 1) if err != nil { return fmt.Errorf("Error waiting for delete on PerInstanceConfig %q: %s", d.Id(), err) - } + } } log.Printf("[DEBUG] Finished deleting PerInstanceConfig %q: %#v", d.Id(), res) diff --git a/mmv1/templates/terraform/custom_delete/region_per_instance_config.go.erb b/mmv1/templates/terraform/custom_delete/region_per_instance_config.go.erb index 72caafa23452..4df61c37649d 100644 --- a/mmv1/templates/terraform/custom_delete/region_per_instance_config.go.erb +++ b/mmv1/templates/terraform/custom_delete/region_per_instance_config.go.erb @@ -10,14 +10,31 @@ transport_tpg.MutexStore.Lock(lockName) defer transport_tpg.MutexStore.Unlock(lockName) - url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/instanceGroupManagers/{{region_instance_group_manager}}/deletePerInstanceConfigs") + var url string + if d.Get("remove_instance_on_destroy").(bool) { + url, err = tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/instanceGroupManagers/{{region_instance_group_manager}}/deleteInstances") + } else { + url, err = tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/instanceGroupManagers/{{region_instance_group_manager}}/deletePerInstanceConfigs") + } if err != nil { return err } var obj map[string]interface{} - obj = map[string]interface{}{ - "names": [1]string{d.Get("name").(string)}, + if d.Get("remove_instance_on_destroy").(bool) { + // Instance name in deleteInstances request must include zone + instanceName, err := findInstanceName(d, config) + if err != nil { + return err + } + + obj = map[string]interface{}{ + "instances": [1]string{instanceName}, + } + } else { + obj = map[string]interface{}{ + "names": [1]string{d.Get("name").(string)}, + } } log.Printf("[DEBUG] Deleting RegionPerInstanceConfig %q", d.Id()) @@ -42,8 +59,13 @@ return err } - // Potentially delete the state managed by this config - if d.Get("remove_instance_state_on_destroy").(bool) { + if d.Get("remove_instance_on_destroy").(bool) { + err = transport_tpg.PollingWaitTime(resourceComputeRegionPerInstanceConfigInstancePollRead(d, meta, d.Get("name").(string)), PollCheckInstanceConfigInstanceDeleted, "Deleting RegionPerInstanceConfig", d.Timeout(schema.TimeoutDelete), 1) + if err != nil { + return fmt.Errorf("Error waiting for instance delete on RegionPerInstanceConfig %q: %s", d.Id(), err) + } + } else if d.Get("remove_instance_state_on_destroy").(bool) { + // Potentially delete the state managed by this config // Instance name in applyUpdatesToInstances request must include zone instanceName, err := findInstanceName(d, config) if err != nil { @@ -86,7 +108,7 @@ err = transport_tpg.PollingWaitTime(resourceComputeRegionPerInstanceConfigPollRead(d, meta), PollCheckInstanceConfigDeleted, "Deleting RegionPerInstanceConfig", d.Timeout(schema.TimeoutDelete), 1) if err != nil { return fmt.Errorf("Error waiting for delete on RegionPerInstanceConfig %q: %s", d.Id(), err) - } + } } log.Printf("[DEBUG] Finished deleting RegionPerInstanceConfig %q: %#v", d.Id(), res) diff --git a/mmv1/templates/terraform/pre_delete/compute_per_instance_config.go.erb b/mmv1/templates/terraform/pre_delete/compute_per_instance_config.go.erb deleted file mode 100644 index 0fcf58a17620..000000000000 --- a/mmv1/templates/terraform/pre_delete/compute_per_instance_config.go.erb +++ /dev/null @@ -1,3 +0,0 @@ -obj = map[string]interface{}{ - "names": [1]string{d.Get("name").(string)}, -} \ No newline at end of file diff --git a/mmv1/templates/terraform/pre_delete/detach_disk.erb b/mmv1/templates/terraform/pre_delete/detach_disk.erb index 55c6eafa4905..cda955083ba3 100644 --- a/mmv1/templates/terraform/pre_delete/detach_disk.erb +++ b/mmv1/templates/terraform/pre_delete/detach_disk.erb @@ -56,7 +56,8 @@ if v, ok := readRes["users"].([]interface{}); ok { err = ComputeOperationWaitTime(config, op, call.project, fmt.Sprintf("Detaching disk from %s/%s/%s", call.project, call.zone, call.instance), userAgent, d.Timeout(schema.TimeoutDelete)) if err != nil { - if opErr, ok := err.(ComputeOperationError); ok && len(opErr.Errors) == 1 && opErr.Errors[0].Code == "RESOURCE_NOT_FOUND" { + var opErr ComputeOperationError + if errors.As(err, &opErr) && len(opErr.Errors) == 1 && opErr.Errors[0].Code == "RESOURCE_NOT_FOUND" { log.Printf("[WARN] instance %q was deleted while awaiting detach", call.instance) continue } diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_per_instance_config_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_per_instance_config_test.go.erb index 0b947748e85d..a7a50f783525 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_per_instance_config_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_per_instance_config_test.go.erb @@ -170,6 +170,67 @@ func TestAccComputePerInstanceConfig_statefulIps(t *testing.T) { }) } +func TestAccComputePerInstanceConfig_removeInstanceOnDestroy(t *testing.T) { + t.Parallel() + + igmName := fmt.Sprintf("tf-test-igm-%s", acctest.RandString(t, 10)) + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "igm_name": igmName, + "config_name": fmt.Sprintf("instance-%s", acctest.RandString(t, 10)), + "config_name2": fmt.Sprintf("instance-%s", acctest.RandString(t, 10)), + "network": fmt.Sprintf("tf-test-igm-%s", acctest.RandString(t, 10)), + "subnetwork": fmt.Sprintf("tf-test-igm-%s", acctest.RandString(t, 10)), + "address1": fmt.Sprintf("tf-test-igm-address%s", acctest.RandString(t, 10)), + "address2": fmt.Sprintf("tf-test-igm-address%s", acctest.RandString(t, 10)), + } + igmId := fmt.Sprintf("projects/%s/zones/%s/instanceGroupManagers/%s", + envvar.GetTestProjectFromEnv(), envvar.GetTestZoneFromEnv(), igmName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccComputePerInstanceConfig_removeInstanceOnDestroyBefore(context), + }, + { + ResourceName: "google_compute_per_instance_config.config_one", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"remove_instance_on_destroy", "zone"}, + }, + { + ResourceName: "google_compute_per_instance_config.config_two", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"remove_instance_on_destroy", "zone"}, + }, + { + Config: testAccComputePerInstanceConfig_removeInstanceOnDestroyAfter(context), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputePerInstanceConfigDestroyed(t, igmId, context["config_name"].(string)), + testAccCheckComputePerInstanceConfigInstanceDestroyed(t, igmId, context["config_name"].(string)), + ), + }, + { + ResourceName: "google_compute_per_instance_config.config_two", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"remove_instance_on_destroy", "zone"}, + }, + { + // delete all configs + Config: testAccComputePerInstanceConfig_igm(context), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputePerInstanceConfigDestroyed(t, igmId, context["config_name2"].(string)), + testAccCheckComputePerInstanceConfigInstanceDestroyed(t, igmId, context["config_name2"].(string)), + ), + }, + }, + }) +} + func testAccComputePerInstanceConfig_statefulBasic(context map[string]interface{}) string { return acctest.Nprintf(` resource "google_compute_per_instance_config" "default" { @@ -340,6 +401,109 @@ resource "google_compute_instance_group_manager" "igm" { `, context) } +func testAccComputePerInstanceConfig_removeInstanceOnDestroyBefore(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_network" "default" { + name = "%{network}" +} + +resource "google_compute_subnetwork" "default" { + name = "%{subnetwork}" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + network = google_compute_network.default.id +} + +resource "google_compute_address" "static_internal_ip" { + name = "%{address1}" + address_type = "INTERNAL" +} + +resource "google_compute_address" "static_external_ip" { + name = "%{address2}" + address_type = "EXTERNAL" +} + +resource "google_compute_per_instance_config" "config_one" { + instance_group_manager = google_compute_instance_group_manager.igm.name + name = "%{config_name}" + remove_instance_on_destroy = true + preserved_state { + metadata = { + asdf = "config-one" + } + disk { + device_name = "my-stateful-disk1" + source = google_compute_disk.disk.id + } + + disk { + device_name = "my-stateful-disk2" + source = google_compute_disk.disk1.id + } + internal_ip { + ip_address { + address = google_compute_address.static_internal_ip.self_link + } + auto_delete = "NEVER" + interface_name = "nic0" + } + external_ip { + ip_address { + address = google_compute_address.static_external_ip.self_link + } + auto_delete = "NEVER" + interface_name = "nic0" + } + } +} + +resource "google_compute_disk" "disk" { + name = "test-disk-%{random_suffix}" + type = "pd-ssd" + zone = google_compute_instance_group_manager.igm.zone + image = "debian-8-jessie-v20170523" + physical_block_size_bytes = 4096 +} + +resource "google_compute_disk" "disk1" { + name = "test-disk2-%{random_suffix}" + type = "pd-ssd" + zone = google_compute_instance_group_manager.igm.zone + image = "debian-cloud/debian-11" + physical_block_size_bytes = 4096 +} + +resource "google_compute_per_instance_config" "config_two" { + zone = google_compute_instance_group_manager.igm.zone + instance_group_manager = google_compute_instance_group_manager.igm.name + name = "%{config_name2}" + remove_instance_on_destroy = true + preserved_state { + metadata = { + asdf = "config-two" + } + } +} +`, context) + testAccComputePerInstanceConfig_igm(context) +} + +func testAccComputePerInstanceConfig_removeInstanceOnDestroyAfter(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_per_instance_config" "config_two" { + zone = google_compute_instance_group_manager.igm.zone + instance_group_manager = google_compute_instance_group_manager.igm.name + name = "%{config_name2}" + remove_instance_on_destroy = true + preserved_state { + metadata = { + asdf = "config-two" + } + } +} +`, context) + testAccComputePerInstanceConfig_igm(context) +} + func testAccComputePerInstanceConfig_statefulIpsBasic(context map[string]interface{}) string { return acctest.Nprintf(` resource "google_compute_network" "default" { @@ -352,17 +516,17 @@ resource "google_compute_subnetwork" "default" { region = "us-central1" network = google_compute_network.default.id } - + resource "google_compute_address" "static_internal_ip" { name = "%{address1}" address_type = "INTERNAL" } - + resource "google_compute_address" "static_external_ip" { name = "%{address2}" address_type = "EXTERNAL" } - + resource "google_compute_per_instance_config" "default" { instance_group_manager = google_compute_instance_group_manager.igm.name name = "%{config_name}" @@ -404,7 +568,7 @@ resource "google_compute_disk" "disk" { image = "debian-8-jessie-v20170523" physical_block_size_bytes = 4096 } - + resource "google_compute_disk" "disk1" { name = "test-disk2-%{random_suffix}" type = "pd-ssd" @@ -427,17 +591,17 @@ resource "google_compute_subnetwork" "default" { region = "us-central1" network = google_compute_network.default.id } - + resource "google_compute_address" "static_internal_ip" { name = "%{address1}" address_type = "INTERNAL" } - + resource "google_compute_address" "static_external_ip" { name = "%{address2}" address_type = "EXTERNAL" } - + resource "google_compute_per_instance_config" "default" { instance_group_manager = google_compute_instance_group_manager.igm.name name = "%{config_name}" @@ -479,7 +643,7 @@ resource "google_compute_disk" "disk" { image = "debian-8-jessie-v20170523" physical_block_size_bytes = 4096 } - + resource "google_compute_disk" "disk1" { name = "test-disk2-%{random_suffix}" type = "pd-ssd" @@ -505,6 +669,48 @@ func testAccCheckComputePerInstanceConfigDestroyed(t *testing.T, igmId, configNa } } +// Checks that the instance with the given name was destroyed. +func testAccCheckComputePerInstanceConfigInstanceDestroyed(t *testing.T, igmId, configName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + foundNames, err := testAccComputePerInstanceConfigListInstances(t, igmId) + if err != nil { + return fmt.Errorf("unable to confirm instance with name %s was destroyed: %v", configName, err) + } + if _, ok := foundNames[configName]; ok { + return fmt.Errorf("instance with name %s still exists", configName) + } + + return nil + } +} + +func testAccComputePerInstanceConfigListInstances(t *testing.T, igmId string) (map[string]struct{}, error) { + config := acctest.GoogleProviderConfig(t) + + url := fmt.Sprintf("%s%s/listManagedInstances", config.ComputeBasePath, igmId) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + RawURL: url, + UserAgent: config.UserAgent, + }) + if err != nil { + return nil, err + } + + v, ok := res["managedInstances"] + if !ok || v == nil { + return nil, nil + } + items := v.([]interface{}) + instances := make(map[string]struct{}) + for _, item := range items { + instance := item.(map[string]interface{}) + instances[fmt.Sprintf("%v", instance["name"])] = struct{}{} + } + return instances, nil +} + func testAccComputePerInstanceConfigListNames(t *testing.T, igmId string) (map[string]struct{}, error) { config := acctest.GoogleProviderConfig(t) diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_region_per_instance_config_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_region_per_instance_config_test.go.erb index e12d5291e6a7..c2191d46be8c 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_region_per_instance_config_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_region_per_instance_config_test.go.erb @@ -169,6 +169,67 @@ func TestAccComputeRegionPerInstanceConfig_statefulIps(t *testing.T) { }) } +func TestAccComputeRegionPerInstanceConfig_removeInstanceOnDestroy(t *testing.T) { + t.Parallel() + + rigmName := fmt.Sprintf("tf-test-rigm-%s", acctest.RandString(t, 10)) + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "rigm_name": rigmName, + "config_name": fmt.Sprintf("instance-%s", acctest.RandString(t, 10)), + "config_name2": fmt.Sprintf("instance-%s", acctest.RandString(t, 10)), + "network": fmt.Sprintf("tf-test-rigm-%s", acctest.RandString(t, 10)), + "subnetwork": fmt.Sprintf("tf-test-rigm-%s", acctest.RandString(t, 10)), + "address1": fmt.Sprintf("tf-test-rigm-address%s", acctest.RandString(t, 10)), + "address2": fmt.Sprintf("tf-test-rigm-address%s", acctest.RandString(t, 10)), + } + rigmId := fmt.Sprintf("projects/%s/regions/%s/instanceGroupManagers/%s", + envvar.GetTestProjectFromEnv(), envvar.GetTestRegionFromEnv(), rigmName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionPerInstanceConfig_removeInstanceOnDestroyBefore(context), + }, + { + ResourceName: "google_compute_region_per_instance_config.config_one", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"remove_instance_on_destroy", "zone"}, + }, + { + ResourceName: "google_compute_region_per_instance_config.config_two", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"remove_instance_on_destroy", "zone"}, + }, + { + Config: testAccComputeRegionPerInstanceConfig_removeInstanceOnDestroyAfter(context), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionPerInstanceConfigDestroyed(t, rigmId, context["config_name"].(string)), + testAccCheckComputeRegionPerInstanceConfigInstanceDestroyed(t, rigmId, context["config_name"].(string)), + ), + }, + { + ResourceName: "google_compute_region_per_instance_config.config_two", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"remove_instance_on_destroy", "zone"}, + }, + { + // delete all configs + Config: testAccComputeRegionPerInstanceConfig_rigm(context), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionPerInstanceConfigDestroyed(t, rigmId, context["config_name2"].(string)), + testAccCheckComputeRegionPerInstanceConfigInstanceDestroyed(t, rigmId, context["config_name2"].(string)), + ), + }, + }, + }) +} + func testAccComputeRegionPerInstanceConfig_statefulBasic(context map[string]interface{}) string { return acctest.Nprintf(` resource "google_compute_region_per_instance_config" "default" { @@ -347,6 +408,111 @@ resource "google_compute_region_instance_group_manager" "rigm" { `, context) } + +func testAccComputeRegionPerInstanceConfig_removeInstanceOnDestroyBefore(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_network" "default" { + name = "%{network}" +} + +resource "google_compute_subnetwork" "default" { + name = "%{subnetwork}" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + network = google_compute_network.default.id +} + +resource "google_compute_address" "static_internal_ip" { + name = "%{address1}" + address_type = "INTERNAL" +} + +resource "google_compute_address" "static_external_ip" { + name = "%{address2}" + address_type = "EXTERNAL" +} + +resource "google_compute_region_per_instance_config" "config_one" { + region = google_compute_region_instance_group_manager.rigm.region + region_instance_group_manager = google_compute_region_instance_group_manager.rigm.name + name = "%{config_name}" + remove_instance_on_destroy = true + preserved_state { + metadata = { + asdf = "config-one" + } + disk { + device_name = "my-stateful-disk1" + source = google_compute_disk.disk.id + } + + disk { + device_name = "my-stateful-disk2" + source = google_compute_disk.disk1.id + } + internal_ip { + ip_address { + address = google_compute_address.static_internal_ip.self_link + } + auto_delete = "NEVER" + interface_name = "nic0" + } + external_ip { + ip_address { + address = google_compute_address.static_external_ip.self_link + } + auto_delete = "NEVER" + interface_name = "nic0" + } + } +} + +resource "google_compute_disk" "disk" { + name = "test-disk-%{random_suffix}" + type = "pd-ssd" + zone = "us-central1-c" + image = "debian-8-jessie-v20170523" + physical_block_size_bytes = 4096 +} + +resource "google_compute_disk" "disk1" { + name = "test-disk2-%{random_suffix}" + type = "pd-ssd" + zone = "us-central1-c" + image = "debian-cloud/debian-11" + physical_block_size_bytes = 4096 +} + +resource "google_compute_region_per_instance_config" "config_two" { + region = google_compute_region_instance_group_manager.rigm.region + region_instance_group_manager = google_compute_region_instance_group_manager.rigm.name + name = "%{config_name2}" + remove_instance_on_destroy = true + preserved_state { + metadata = { + asdf = "config-two" + } + } +} +`, context) + testAccComputeRegionPerInstanceConfig_rigm(context) +} + +func testAccComputeRegionPerInstanceConfig_removeInstanceOnDestroyAfter(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_region_per_instance_config" "config_two" { + region = google_compute_region_instance_group_manager.rigm.region + region_instance_group_manager = google_compute_region_instance_group_manager.rigm.name + name = "%{config_name2}" + remove_instance_on_destroy = true + preserved_state { + metadata = { + asdf = "config-two" + } + } +} +`, context) + testAccComputeRegionPerInstanceConfig_rigm(context) +} + func testAccComputeRegionPerInstanceConfig_statefulIpsBasic(context map[string]interface{}) string { return acctest.Nprintf(` resource "google_compute_network" "default" { @@ -513,3 +679,18 @@ func testAccCheckComputeRegionPerInstanceConfigDestroyed(t *testing.T, rigmId, c return nil } } + +// Checks that the instance with the given name was destroyed. +func testAccCheckComputeRegionPerInstanceConfigInstanceDestroyed(t *testing.T, rigmId, configName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + foundNames, err := testAccComputePerInstanceConfigListInstances(t, rigmId) + if err != nil { + return fmt.Errorf("unable to confirm instance with name %s was destroyed: %v", configName, err) + } + if _, ok := foundNames[configName]; ok { + return fmt.Errorf("instance with name %s still exists", configName) + } + + return nil + } +} diff --git a/mmv1/third_party/terraform/services/compute/stateful_mig_polling.go b/mmv1/third_party/terraform/services/compute/stateful_mig_polling.go index 57f96ff9fd62..fa22c2ec1d00 100644 --- a/mmv1/third_party/terraform/services/compute/stateful_mig_polling.go +++ b/mmv1/third_party/terraform/services/compute/stateful_mig_polling.go @@ -47,6 +47,58 @@ func resourceComputePerInstanceConfigPollRead(d *schema.ResourceData, meta inter } } +// Deleting a PerInstanceConfig & the underlying instance needs both regular operation polling AND custom polling for deletion which is why this is not generated +func resourceComputePerInstanceConfigInstancePollRead(d *schema.ResourceData, meta interface{}, instanceName string) transport_tpg.PollReadFunc { + return func() (map[string]interface{}, error) { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return nil, err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/zones/{{zone}}/instanceGroupManagers/{{instance_group_manager}}/listManagedInstances") + if err != nil { + return nil, err + } + + url, err = transport_tpg.AddQueryParams(url, map[string]string{"filter": fmt.Sprintf("name=%q", instanceName)}) + if err != nil { + return nil, err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return nil, err + } + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return res, err + } + + value, ok := res["managedInstances"] + if !ok || value == nil { + return nil, nil + } + + managedInstances, ok := value.([]interface{}) + if !ok { + return nil, fmt.Errorf("expected list for value managedInstances. Actual value: %v", value) + } + + if len(managedInstances) == 1 { + return managedInstances[0].(map[string]interface{}), nil + } + + return nil, nil + } +} + // RegionPerInstanceConfig needs both regular operation polling AND custom polling for deletion which is why this is not generated func resourceComputeRegionPerInstanceConfigPollRead(d *schema.ResourceData, meta interface{}) transport_tpg.PollReadFunc { return func() (map[string]interface{}, error) { @@ -85,6 +137,58 @@ func resourceComputeRegionPerInstanceConfigPollRead(d *schema.ResourceData, meta } } +// Deleting a RegionPerInstanceConfig & the underlying instance needs both regular operation polling AND custom polling for deletion which is why this is not generated +func resourceComputeRegionPerInstanceConfigInstancePollRead(d *schema.ResourceData, meta interface{}, instanceName string) transport_tpg.PollReadFunc { + return func() (map[string]interface{}, error) { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return nil, err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{ComputeBasePath}}projects/{{project}}/regions/{{region}}/instanceGroupManagers/{{region_instance_group_manager}}/listManagedInstances") + if err != nil { + return nil, err + } + + url, err = transport_tpg.AddQueryParams(url, map[string]string{"filter": fmt.Sprintf("name=%q", instanceName)}) + if err != nil { + return nil, err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return nil, err + } + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return res, err + } + + value, ok := res["managedInstances"] + if !ok || value == nil { + return nil, nil + } + + managedInstances, ok := value.([]interface{}) + if !ok { + return nil, fmt.Errorf("expected list for value managedInstances. Actual value: %v", value) + } + + if len(managedInstances) == 1 { + return managedInstances[0].(map[string]interface{}), nil + } + + return nil, nil + } +} + // Returns an instance name in the form zones/{zone}/instances/{instance} for the managed // instance matching the name of a PerInstanceConfig func findInstanceName(d *schema.ResourceData, config *transport_tpg.Config) (string, error) { @@ -167,3 +271,22 @@ func PollCheckInstanceConfigDeleted(resp map[string]interface{}, respErr error) } return transport_tpg.ErrorPollResult(fmt.Errorf("Expected PerInstanceConfig to be deleting but status is: %s", status)) } + +func PollCheckInstanceConfigInstanceDeleted(resp map[string]interface{}, respErr error) transport_tpg.PollResult { + if respErr != nil { + return transport_tpg.ErrorPollResult(respErr) + } + + // Nested object 404 appears as nil response + if resp == nil { + // Instance no longer exists + return transport_tpg.SuccessPollResult() + } + + // Read status + status := resp["currentAction"].(string) + if status == "DELETING" { + return transport_tpg.PendingStatusPollResult("Still deleting") + } + return transport_tpg.ErrorPollResult(fmt.Errorf("Expected PerInstanceConfig instance to be deleting but status is: %s", status)) +} diff --git a/mmv1/third_party/terraform/tpgresource/common_operation.go b/mmv1/third_party/terraform/tpgresource/common_operation.go index 10b4de877879..cfe6880fde86 100644 --- a/mmv1/third_party/terraform/tpgresource/common_operation.go +++ b/mmv1/third_party/terraform/tpgresource/common_operation.go @@ -151,7 +151,7 @@ func OperationWait(w Waiter, activity string, timeout time.Duration, pollInterva } opRaw, err := c.WaitForState() if err != nil { - return fmt.Errorf("Error waiting for %s: %s", activity, err) + return fmt.Errorf("Error waiting for %s: %w", activity, err) } err = w.SetOp(opRaw) From d026cd2d4c705b31429b353b8ef1df25ee18048a Mon Sep 17 00:00:00 2001 From: Eric Pang Date: Thu, 7 Dec 2023 15:26:02 -0500 Subject: [PATCH 29/44] Add host_config, state_note, kms_key, private_config fields to Secure Source Manager Instance resource (#9583) --- .../securesourcemanager/Instance.yaml | 85 +++++++++++++++++++ ...secure_source_manager_instance_cmek.tf.erb | 26 ++++++ ...ure_source_manager_instance_private.tf.erb | 76 +++++++++++++++++ 3 files changed, 187 insertions(+) create mode 100644 mmv1/templates/terraform/examples/secure_source_manager_instance_cmek.tf.erb create mode 100644 mmv1/templates/terraform/examples/secure_source_manager_instance_private.tf.erb diff --git a/mmv1/products/securesourcemanager/Instance.yaml b/mmv1/products/securesourcemanager/Instance.yaml index a058f6a75064..f619fee4d56b 100644 --- a/mmv1/products/securesourcemanager/Instance.yaml +++ b/mmv1/products/securesourcemanager/Instance.yaml @@ -63,6 +63,27 @@ examples: ])" vars: instance_id: 'my-instance' + - !ruby/object:Provider::Terraform::Examples + name: 'secure_source_manager_instance_cmek' + primary_resource_id: 'default' + primary_resource_name: "fmt.Sprintf(\"tf-test-my-instance%s\", + context[\"random_suffix\"\ + ])" + vars: + instance_id: 'my-instance' + keyring_name: 'my-keyring' + key_name: 'my-key' + - !ruby/object:Provider::Terraform::Examples + name: 'secure_source_manager_instance_private' + pull_external: true + primary_resource_id: 'default' + primary_resource_name: "fmt.Sprintf(\"tf-test-my-instance%s\", + context[\"random_suffix\"\ + ])" + vars: + instance_id: 'my-instance' + ca_pool_id: 'ca-pool' + root_ca_id: 'root-ca' parameters: - !ruby/object:Api::Type::String name: 'location' @@ -110,3 +131,67 @@ properties: - :DELETING - :PAUSED - :UNKNOWN + - !ruby/object:Api::Type::NestedObject + name: 'hostConfig' + description: | + A list of hostnames for this instance. + output: true + properties: + - !ruby/object:Api::Type::String + name: 'html' + description: 'HTML hostname.' + output: true + - !ruby/object:Api::Type::String + name: 'api' + description: 'API hostname.' + output: true + - !ruby/object:Api::Type::String + name: 'gitHttp' + description: 'Git HTTP hostname.' + output: true + - !ruby/object:Api::Type::String + name: 'gitSsh' + description: 'Git SSH hostname.' + output: true + - !ruby/object:Api::Type::Enum + name: 'stateNote' + description: | + Provides information about the current instance state. + output: true + values: + - :STATE_NOTE_UNSPECIFIED + - :PAUSED_CMEK_UNAVAILABLE + - :INSTANCE_RESUMING + - !ruby/object:Api::Type::String + name: 'kmsKey' + description: | + Customer-managed encryption key name, in the format projects/*/locations/*/keyRings/*/cryptoKeys/*. + immutable: true + - !ruby/object:Api::Type::NestedObject + name: 'privateConfig' + description: | + Private settings for private instance. + immutable: true + properties: + - !ruby/object:Api::Type::Boolean + name: 'isPrivate' + description: | + 'Indicate if it's private instance.' + required: true + immutable: true + - !ruby/object:Api::Type::String + name: 'caPool' + description: | + CA pool resource, resource must in the format of `projects/{project}/locations/{location}/caPools/{ca_pool}`. + required: true + immutable: true + - !ruby/object:Api::Type::String + name: 'httpServiceAttachment' + description: | + Service Attachment for HTTP, resource is in the format of `projects/{project}/regions/{region}/serviceAttachments/{service_attachment}`. + output: true + - !ruby/object:Api::Type::String + name: 'sshServiceAttachment' + description: | + Service Attachment for SSH, resource is in the format of `projects/{project}/regions/{region}/serviceAttachments/{service_attachment}`. + output: true diff --git a/mmv1/templates/terraform/examples/secure_source_manager_instance_cmek.tf.erb b/mmv1/templates/terraform/examples/secure_source_manager_instance_cmek.tf.erb new file mode 100644 index 000000000000..abde1100aaa7 --- /dev/null +++ b/mmv1/templates/terraform/examples/secure_source_manager_instance_cmek.tf.erb @@ -0,0 +1,26 @@ +resource "google_kms_key_ring" "key_ring" { + name = "<%= ctx[:vars]['keyring_name'] %>" + location = "us-central1" +} + +resource "google_kms_crypto_key" "crypto_key" { + name = "<%= ctx[:vars]['key_name'] %>" + key_ring = google_kms_key_ring.key_ring.id +} + +resource "google_kms_crypto_key_iam_binding" "crypto_key_binding" { + crypto_key_id = google_kms_crypto_key.crypto_key.id + role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" + + members = [ + "serviceAccount:service-${data.google_project.project.number}@gcp-sa-sourcemanager.iam.gserviceaccount.com" + ] +} + +resource "google_secure_source_manager_instance" "<%= ctx[:primary_resource_id] %>" { + location = "us-central1" + instance_id = "<%= ctx[:vars]['instance_id'] %>" + kms_key = google_kms_crypto_key.crypto_key.id +} + +data "google_project" "project" {} \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/secure_source_manager_instance_private.tf.erb b/mmv1/templates/terraform/examples/secure_source_manager_instance_private.tf.erb new file mode 100644 index 000000000000..03a334e9bf97 --- /dev/null +++ b/mmv1/templates/terraform/examples/secure_source_manager_instance_private.tf.erb @@ -0,0 +1,76 @@ +resource "google_privateca_ca_pool" "ca_pool" { + name = "<%= ctx[:vars]['ca_pool_id'] %>" + location = "us-central1" + tier = "ENTERPRISE" + publishing_options { + publish_ca_cert = true + publish_crl = true + } +} + +resource "google_privateca_certificate_authority" "root_ca" { + pool = google_privateca_ca_pool.ca_pool.name + certificate_authority_id = "<%= ctx[:vars]['root_ca_id'] %>" + location = "us-central1" + config { + subject_config { + subject { + organization = "google" + common_name = "my-certificate-authority" + } + } + x509_config { + ca_options { + is_ca = true + } + key_usage { + base_key_usage { + cert_sign = true + crl_sign = true + } + extended_key_usage { + server_auth = true + } + } + } + } + key_spec { + algorithm = "RSA_PKCS1_4096_SHA256" + } + + // Disable deletion protections for easier test cleanup purposes + deletion_protection = false + ignore_active_certificates_on_deletion = true + skip_grace_period = true +} + +resource "google_privateca_ca_pool_iam_binding" "ca_pool_binding" { + ca_pool = google_privateca_ca_pool.ca_pool.id + role = "roles/privateca.certificateRequester" + + members = [ + "serviceAccount:service-${data.google_project.project.number}@gcp-sa-sourcemanager.iam.gserviceaccount.com" + ] +} + +resource "google_secure_source_manager_instance" "<%= ctx[:primary_resource_id] %>" { + instance_id = "<%= ctx[:vars]['instance_id'] %>" + location = "us-central1" + private_config { + is_private = true + ca_pool = google_privateca_ca_pool.ca_pool.id + } + depends_on = [ + google_privateca_certificate_authority.root_ca, + time_sleep.wait_60_seconds + ] +} + +# ca pool IAM permissions can take time to propagate +resource "time_sleep" "wait_60_seconds" { + depends_on = [google_privateca_ca_pool_iam_binding.ca_pool_binding] + + create_duration = "60s" +} + +data "google_project" "project" {} \ No newline at end of file From 65322ce7395a0a1e39dcfe357dae47c90b954c5a Mon Sep 17 00:00:00 2001 From: Esteban Bouza Date: Fri, 8 Dec 2023 05:26:14 +0800 Subject: [PATCH 30/44] Add data source for google_compute_region_disk (#9421) --- .../provider/provider_mmv1_resources.go.erb | 5 +- .../data_source_google_compute_region_disk.go | 46 +++++++++++++++++ ..._source_google_compute_region_disk_test.go | 49 ++++++++++++++++++ .../docs/d/compute_region_disk.html.markdown | 50 +++++++++++++++++++ 4 files changed, 148 insertions(+), 2 deletions(-) create mode 100644 mmv1/third_party/terraform/services/compute/data_source_google_compute_region_disk.go create mode 100644 mmv1/third_party/terraform/services/compute/data_source_google_compute_region_disk_test.go create mode 100644 mmv1/third_party/terraform/website/docs/d/compute_region_disk.html.markdown diff --git a/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb b/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb index 38e02676b2ec..a43b78ba436c 100644 --- a/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb +++ b/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb @@ -73,7 +73,7 @@ var handwrittenDatasources = map[string]*schema.Resource{ "google_compute_backend_service": compute.DataSourceGoogleComputeBackendService(), "google_compute_backend_bucket": compute.DataSourceGoogleComputeBackendBucket(), "google_compute_default_service_account": compute.DataSourceGoogleComputeDefaultServiceAccount(), - "google_compute_disk": compute.DataSourceGoogleComputeDisk(), + "google_compute_disk": compute.DataSourceGoogleComputeDisk(), "google_compute_forwarding_rule": compute.DataSourceGoogleComputeForwardingRule(), "google_compute_global_address": compute.DataSourceGoogleComputeGlobalAddress(), "google_compute_global_forwarding_rule": compute.DataSourceGoogleComputeGlobalForwardingRule(), @@ -92,9 +92,10 @@ var handwrittenDatasources = map[string]*schema.Resource{ "google_compute_network_peering": compute.DataSourceComputeNetworkPeering(), "google_compute_node_types": compute.DataSourceGoogleComputeNodeTypes(), "google_compute_regions": compute.DataSourceGoogleComputeRegions(), - "google_compute_region_network_endpoint_group": compute.DataSourceGoogleComputeRegionNetworkEndpointGroup(), + "google_compute_region_disk": compute.DataSourceGoogleComputeRegionDisk(), "google_compute_region_instance_group": compute.DataSourceGoogleComputeRegionInstanceGroup(), "google_compute_region_instance_template": compute.DataSourceGoogleComputeRegionInstanceTemplate(), + "google_compute_region_network_endpoint_group": compute.DataSourceGoogleComputeRegionNetworkEndpointGroup(), "google_compute_region_ssl_certificate": compute.DataSourceGoogleRegionComputeSslCertificate(), "google_compute_resource_policy": compute.DataSourceGoogleComputeResourcePolicy(), "google_compute_router": compute.DataSourceGoogleComputeRouter(), diff --git a/mmv1/third_party/terraform/services/compute/data_source_google_compute_region_disk.go b/mmv1/third_party/terraform/services/compute/data_source_google_compute_region_disk.go new file mode 100644 index 000000000000..010c96f65f72 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/data_source_google_compute_region_disk.go @@ -0,0 +1,46 @@ +package compute + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleComputeRegionDisk() *schema.Resource { + + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceComputeRegionDisk().Schema) + tpgresource.AddRequiredFieldsToSchema(dsSchema, "name") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "region") + + return &schema.Resource{ + Read: dataSourceGoogleComputeRegionDiskRead, + Schema: dsSchema, + } +} + +func dataSourceGoogleComputeRegionDiskRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/disks/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + err = resourceComputeRegionDiskRead(d, meta) + if err != nil { + return err + } + + if err := tpgresource.SetDataSourceLabels(d); err != nil { + return err + } + + if d.Id() == "" { + return fmt.Errorf("%s not found", id) + } + + return nil +} diff --git a/mmv1/third_party/terraform/services/compute/data_source_google_compute_region_disk_test.go b/mmv1/third_party/terraform/services/compute/data_source_google_compute_region_disk_test.go new file mode 100644 index 000000000000..d28025ef857b --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/data_source_google_compute_region_disk_test.go @@ -0,0 +1,49 @@ +package compute_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccDataSourceGoogleComputeRegionDisk_basic(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeDiskDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataSourceGoogleComputeRegionDisk_basic(context), + Check: resource.ComposeTestCheckFunc( + acctest.CheckDataSourceStateMatchesResourceState("data.google_compute_region_disk.foo", "google_compute_region_disk.foo"), + ), + }, + }, + }) +} + +func testAccDataSourceGoogleComputeRegionDisk_basic(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_region_disk" "foo" { + name = "tf-test-compute-disk-%{random_suffix}" + type = "pd-standard" + replica_zones = ["us-central1-a", "us-central1-f"] + + labels = { + my-label = "my-label-value" + } +} + +data "google_compute_region_disk" "foo" { + name = google_compute_region_disk.foo.name + project = google_compute_region_disk.foo.project +} +`, context) +} diff --git a/mmv1/third_party/terraform/website/docs/d/compute_region_disk.html.markdown b/mmv1/third_party/terraform/website/docs/d/compute_region_disk.html.markdown new file mode 100644 index 000000000000..ca3b5443ee4c --- /dev/null +++ b/mmv1/third_party/terraform/website/docs/d/compute_region_disk.html.markdown @@ -0,0 +1,50 @@ +--- +subcategory: "Compute Engine" +description: |- + Get information about a Google Compute Regional Persistent disks. +--- + +# google\_compute\_region\_disk + +Get information about a Google Compute Regional Persistent disks. + +[the official documentation](https://cloud.google.com/compute/docs/disks) and its [API](https://cloud.google.com/compute/docs/reference/rest/v1/regionDisks). + +## Example Usage + +```hcl +data "google_compute_region_disk" "disk" { + name = "persistent-regional-disk" + project = "example" + region = "us-central1" + type = "pd-ssd" + physical_block_size_bytes = 4096 + + replica_zones = ["us-central1-a", "us-central1-f"] +} + +resource "google_compute_instance" "default" { + # ... + + attached_disk { + source = data.google_compute_disk.disk.self_link + } +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name of a specific disk. + +- - - + +* `region` - (Optional) A reference to the region where the disk resides. + +* `project` - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + +## Attributes Reference + +See [google_compute_region_disk](https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/compute_region_disk) resource for details of the available attributes. \ No newline at end of file From a007c7ce0f9498648bb61de3e9151b0639ed181b Mon Sep 17 00:00:00 2001 From: Ilia Lazebnik Date: Thu, 7 Dec 2023 21:35:15 +0000 Subject: [PATCH 31/44] dataform_repository - add missing args (#9173) --- mmv1/products/dataform/Repository.yaml | 11 +++++++++++ .../terraform/examples/dataform_repository.tf.erb | 6 ++++++ 2 files changed, 17 insertions(+) diff --git a/mmv1/products/dataform/Repository.yaml b/mmv1/products/dataform/Repository.yaml index 6c7b206f65a6..1d99d98c8fb7 100644 --- a/mmv1/products/dataform/Repository.yaml +++ b/mmv1/products/dataform/Repository.yaml @@ -123,3 +123,14 @@ properties: - !ruby/object:Api::Type::String name: 'serviceAccount' description: The service account to run workflow invocations under. + - !ruby/object:Api::Type::String + name: npmrcEnvironmentVariablesSecretVersion + description: Optional. The name of the Secret Manager secret version to be used to interpolate variables into the .npmrc file for package installation operations. Must be in the format projects/*/secrets/*/versions/*. The file itself must be in a JSON format. + - !ruby/object:Api::Type::String + name: displayName + description: Optional. The repository's user-friendly name. + - !ruby/object:Api::Type::KeyValueLabels + name: labels + description: | + Optional. Repository user labels. + An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. diff --git a/mmv1/templates/terraform/examples/dataform_repository.tf.erb b/mmv1/templates/terraform/examples/dataform_repository.tf.erb index 3a9e3232aa57..1e76e9133b1e 100644 --- a/mmv1/templates/terraform/examples/dataform_repository.tf.erb +++ b/mmv1/templates/terraform/examples/dataform_repository.tf.erb @@ -22,6 +22,12 @@ resource "google_secret_manager_secret_version" "secret_version" { resource "google_dataform_repository" "<%= ctx[:primary_resource_id] %>" { provider = google-beta name = "<%= ctx[:vars]['dataform_repository_name'] %>" + display_name = "<%= ctx[:vars]['dataform_repository_name'] %>" + npmrc_environment_variables_secret_version = google_secret_manager_secret_version.secret_version.id + + labels = { + label_foo1 = "label-bar1" + } git_remote_settings { url = google_sourcerepo_repository.git_repository.url From c3b3a420aa6290cb448cb232185fb9cff4bc04f7 Mon Sep 17 00:00:00 2001 From: Sarah French <15078782+SarahFrench@users.noreply.github.com> Date: Thu, 7 Dec 2023 22:26:13 +0000 Subject: [PATCH 32/44] Add migrationcenter service to enabled APIs (#9603) --- .ci/infra/terraform/main.tf | 1 + 1 file changed, 1 insertion(+) diff --git a/.ci/infra/terraform/main.tf b/.ci/infra/terraform/main.tf index bed01ed1da2a..49ecc649a20d 100644 --- a/.ci/infra/terraform/main.tf +++ b/.ci/infra/terraform/main.tf @@ -261,6 +261,7 @@ module "project-services" { "managedidentities.googleapis.com", "memcache.googleapis.com", "metastore.googleapis.com", + "migrationcenter.googleapis.com", "ml.googleapis.com", "mobilecrashreporting.googleapis.com", "monitoring.googleapis.com", From d4a31b0b29bc5759eb10f1f29a9df1f67cbbd7b3 Mon Sep 17 00:00:00 2001 From: Eric Pang Date: Thu, 7 Dec 2023 18:00:28 -0500 Subject: [PATCH 33/44] Update documentation links in Secure Source Manager Instance (#9585) * Update links * Update mmv1/products/securesourcemanager/Instance.yaml Co-authored-by: Nick Elliot --------- Co-authored-by: Nick Elliot --- mmv1/products/securesourcemanager/Instance.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mmv1/products/securesourcemanager/Instance.yaml b/mmv1/products/securesourcemanager/Instance.yaml index f619fee4d56b..963c073e5ad0 100644 --- a/mmv1/products/securesourcemanager/Instance.yaml +++ b/mmv1/products/securesourcemanager/Instance.yaml @@ -20,7 +20,8 @@ immutable: true description: 'Instances are deployed to an available Google Cloud region and are accessible via their web interface.' references: !ruby/object:Api::Resource::ReferenceLinks guides: - 'Official Documentation': 'https://cloud.google.com/secure-source-manager/overview/overview' + 'Official Documentation': 'https://cloud.google.com/secure-source-manager/docs/create-instance' + api: 'https://cloud.google.com/secure-source-manager/docs/reference/rest/v1/projects.locations.instances' import_format: ['projects/{{project}}/locations/{{location}}/instances/{{instance_id}}', '{{instance_id}}'] autogen_async: true async: !ruby/object:Api::OpAsync From 67e40a4feff66c569577d0e25e4b0e49d564fbf2 Mon Sep 17 00:00:00 2001 From: Sarah French <15078782+SarahFrench@users.noreply.github.com> Date: Fri, 8 Dec 2023 14:25:22 +0000 Subject: [PATCH 34/44] Ignore `update_time` in import test on `google_vmwareengine_network_policy` resource (#9601) --- .../resource_vmwareengine_private_cloud_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mmv1/third_party/terraform/services/vmwareengine/resource_vmwareengine_private_cloud_test.go b/mmv1/third_party/terraform/services/vmwareengine/resource_vmwareengine_private_cloud_test.go index 9a189bd1c378..40b0cde0a558 100644 --- a/mmv1/third_party/terraform/services/vmwareengine/resource_vmwareengine_private_cloud_test.go +++ b/mmv1/third_party/terraform/services/vmwareengine/resource_vmwareengine_private_cloud_test.go @@ -38,7 +38,7 @@ func TestAccVmwareenginePrivateCloud_vmwareEnginePrivateCloudUpdate(t *testing.T ResourceName: "google_vmwareengine_private_cloud.vmw-engine-pc", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"location", "name"}, + ImportStateVerifyIgnore: []string{"location", "name", "update_time"}, }, { Config: testPrivateCloudUpdateConfig(context, "description2", 4), // Expand PC @@ -47,7 +47,7 @@ func TestAccVmwareenginePrivateCloud_vmwareEnginePrivateCloudUpdate(t *testing.T ResourceName: "google_vmwareengine_private_cloud.vmw-engine-pc", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"location", "name"}, + ImportStateVerifyIgnore: []string{"location", "name", "update_time"}, }, { Config: testPrivateCloudUpdateConfig(context, "description2", 3), // Shrink PC @@ -56,7 +56,7 @@ func TestAccVmwareenginePrivateCloud_vmwareEnginePrivateCloudUpdate(t *testing.T ResourceName: "google_vmwareengine_private_cloud.vmw-engine-pc", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"location", "name"}, + ImportStateVerifyIgnore: []string{"location", "name", "update_time"}, }, }, }) From 528ec5d02ef2a696e4c2887551eff5cf1bd535fe Mon Sep 17 00:00:00 2001 From: Amir Romashkin Date: Fri, 8 Dec 2023 18:35:02 +0100 Subject: [PATCH 35/44] Extract converter_factory and hcl_write from utils (#9607) --- mmv1/third_party/cai2hcl/common/converter.go | 4 -- .../cai2hcl/common/converter_factory.go | 21 +++++++ mmv1/third_party/cai2hcl/common/hcl_write.go | 55 +++++++++++++++++ mmv1/third_party/cai2hcl/common/utils.go | 60 ------------------- 4 files changed, 76 insertions(+), 64 deletions(-) create mode 100644 mmv1/third_party/cai2hcl/common/converter_factory.go create mode 100644 mmv1/third_party/cai2hcl/common/hcl_write.go diff --git a/mmv1/third_party/cai2hcl/common/converter.go b/mmv1/third_party/cai2hcl/common/converter.go index 17f6a652daac..b83af7b72568 100644 --- a/mmv1/third_party/cai2hcl/common/converter.go +++ b/mmv1/third_party/cai2hcl/common/converter.go @@ -2,7 +2,6 @@ package common import ( "github.com/GoogleCloudPlatform/terraform-google-conversion/v5/caiasset" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/zclconf/go-cty/cty" ) @@ -12,9 +11,6 @@ type Converter interface { Convert(asset []*caiasset.Asset) ([]*HCLResourceBlock, error) } -// Function initializing a converter from TF resource name and TF resource schema. -type ConverterFactory = func(name string, schema map[string]*schema.Schema) Converter - // HCLResourceBlock identifies the HCL block's labels and content. type HCLResourceBlock struct { Labels []string diff --git a/mmv1/third_party/cai2hcl/common/converter_factory.go b/mmv1/third_party/cai2hcl/common/converter_factory.go new file mode 100644 index 000000000000..e4bec63d3184 --- /dev/null +++ b/mmv1/third_party/cai2hcl/common/converter_factory.go @@ -0,0 +1,21 @@ +package common + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + tpg "github.com/hashicorp/terraform-provider-google-beta/google-beta/provider" +) + +// Function initializing a converter from TF resource name and TF resource schema. +type ConverterFactory = func(name string, schema map[string]*schema.Schema) Converter + +// Initializes map of converters. +func CreateConverterMap(converterFactories map[string]ConverterFactory) map[string]Converter { + tpgProvider := tpg.Provider() + + result := make(map[string]Converter, len(converterFactories)) + for name, factory := range converterFactories { + result[name] = factory(name, tpgProvider.ResourcesMap[name].Schema) + } + + return result +} diff --git a/mmv1/third_party/cai2hcl/common/hcl_write.go b/mmv1/third_party/cai2hcl/common/hcl_write.go new file mode 100644 index 000000000000..c14b67401f99 --- /dev/null +++ b/mmv1/third_party/cai2hcl/common/hcl_write.go @@ -0,0 +1,55 @@ +package common + +import ( + "fmt" + + "github.com/hashicorp/hcl/v2/hclwrite" + "github.com/zclconf/go-cty/cty" +) + +func hclWriteBlock(val cty.Value, body *hclwrite.Body) error { + if val.IsNull() { + return nil + } + if !val.Type().IsObjectType() { + return fmt.Errorf("expect object type only, but type = %s", val.Type().FriendlyName()) + } + it := val.ElementIterator() + for it.Next() { + objKey, objVal := it.Element() + if objVal.IsNull() { + continue + } + objValType := objVal.Type() + switch { + case objValType.IsObjectType(): + newBlock := body.AppendNewBlock(objKey.AsString(), nil) + if err := hclWriteBlock(objVal, newBlock.Body()); err != nil { + return err + } + case objValType.IsCollectionType(): + if objVal.LengthInt() == 0 { + continue + } + // Presumes map should not contain object type. + if !objValType.IsMapType() && objValType.ElementType().IsObjectType() { + listIterator := objVal.ElementIterator() + for listIterator.Next() { + _, listVal := listIterator.Element() + subBlock := body.AppendNewBlock(objKey.AsString(), nil) + if err := hclWriteBlock(listVal, subBlock.Body()); err != nil { + return err + } + } + continue + } + fallthrough + default: + if objValType.FriendlyName() == "string" && objVal.AsString() == "" { + continue + } + body.SetAttributeValue(objKey.AsString(), objVal) + } + } + return nil +} diff --git a/mmv1/third_party/cai2hcl/common/utils.go b/mmv1/third_party/cai2hcl/common/utils.go index 0325957d9b3e..cac4204eec2f 100644 --- a/mmv1/third_party/cai2hcl/common/utils.go +++ b/mmv1/third_party/cai2hcl/common/utils.go @@ -10,7 +10,6 @@ import ( "github.com/hashicorp/hcl/hcl/printer" "github.com/hashicorp/hcl/v2/hclwrite" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - tpg "github.com/hashicorp/terraform-provider-google-beta/google-beta/provider" "github.com/zclconf/go-cty/cty" ctyjson "github.com/zclconf/go-cty/cty/json" ) @@ -55,18 +54,6 @@ func MapToCtyValWithSchema(m map[string]interface{}, s map[string]*schema.Schema return ret, nil } -// Initializes map of converters. -func CreateConverterMap(converterFactories map[string]ConverterFactory) map[string]Converter { - tpgProvider := tpg.Provider() - - result := make(map[string]Converter, len(converterFactories)) - for name, factory := range converterFactories { - result[name] = factory(name, tpgProvider.ResourcesMap[name].Schema) - } - - return result -} - func Convert(assets []*caiasset.Asset, converterNames map[string]string, converterMap map[string]Converter) ([]byte, error) { // Group resources from the same tf resource type for convert. // tf -> cai has 1:N mappings occasionally @@ -105,53 +92,6 @@ func Convert(assets []*caiasset.Asset, converterNames map[string]string, convert return printer.Format(f.Bytes()) } -func hclWriteBlock(val cty.Value, body *hclwrite.Body) error { - if val.IsNull() { - return nil - } - if !val.Type().IsObjectType() { - return fmt.Errorf("expect object type only, but type = %s", val.Type().FriendlyName()) - } - it := val.ElementIterator() - for it.Next() { - objKey, objVal := it.Element() - if objVal.IsNull() { - continue - } - objValType := objVal.Type() - switch { - case objValType.IsObjectType(): - newBlock := body.AppendNewBlock(objKey.AsString(), nil) - if err := hclWriteBlock(objVal, newBlock.Body()); err != nil { - return err - } - case objValType.IsCollectionType(): - if objVal.LengthInt() == 0 { - continue - } - // Presumes map should not contain object type. - if !objValType.IsMapType() && objValType.ElementType().IsObjectType() { - listIterator := objVal.ElementIterator() - for listIterator.Next() { - _, listVal := listIterator.Element() - subBlock := body.AppendNewBlock(objKey.AsString(), nil) - if err := hclWriteBlock(listVal, subBlock.Body()); err != nil { - return err - } - } - continue - } - fallthrough - default: - if objValType.FriendlyName() == "string" && objVal.AsString() == "" { - continue - } - body.SetAttributeValue(objKey.AsString(), objVal) - } - } - return nil -} - func hashicorpCtyTypeToZclconfCtyType(t hashicorpcty.Type) (cty.Type, error) { b, err := json.Marshal(t) if err != nil { From a691e0379079f0fd3f65f054b4d73456f314dbe8 Mon Sep 17 00:00:00 2001 From: Shuya Ma <87669292+shuyama1@users.noreply.github.com> Date: Fri, 8 Dec 2023 11:28:47 -0800 Subject: [PATCH 36/44] =?UTF-8?q?add=20optional=20input=20ParentService=20?= =?UTF-8?q?to=20BootstrapSharedServiceNetworkingC=E2=80=A6=20(#9598)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- mmv1/products/looker/Instance.yaml | 2 +- .../terraform/acctest/bootstrap_test_utils.go | 36 +++++++++++++++++-- 2 files changed, 34 insertions(+), 4 deletions(-) diff --git a/mmv1/products/looker/Instance.yaml b/mmv1/products/looker/Instance.yaml index 5af50943cc3b..38b871deb18e 100644 --- a/mmv1/products/looker/Instance.yaml +++ b/mmv1/products/looker/Instance.yaml @@ -71,7 +71,7 @@ examples: test_vars_overrides: address_name: 'acctest.BootstrapSharedTestGlobalAddress(t, "looker-vpc-network-1", acctest.AddressWithPrefixLength(20))' kms_key_name: 'acctest.BootstrapKMSKeyInLocation(t, "us-central1").CryptoKey.Name' - network_name: 'acctest.BootstrapSharedServiceNetworkingConnection(t, "looker-vpc-network-1", acctest.AddressWithPrefixLength(20))' + network_name: 'acctest.BootstrapSharedServiceNetworkingConnection(t, "looker-vpc-network-1", acctest.ServiceNetworkWithPrefixLength(20))' skip_docs: true parameters: - !ruby/object:Api::Type::String diff --git a/mmv1/third_party/terraform/acctest/bootstrap_test_utils.go b/mmv1/third_party/terraform/acctest/bootstrap_test_utils.go index 213db73ecbd3..d0bb61468ad6 100644 --- a/mmv1/third_party/terraform/acctest/bootstrap_test_utils.go +++ b/mmv1/third_party/terraform/acctest/bootstrap_test_utils.go @@ -443,6 +443,35 @@ func BootstrapSharedTestGlobalAddress(t *testing.T, testId string, params ...fun return address.Name } +type ServiceNetworkSettings struct { + PrefixLength int + ParentService string +} + +func ServiceNetworkWithPrefixLength(prefixLength int) func(*ServiceNetworkSettings) { + return func(settings *ServiceNetworkSettings) { + settings.PrefixLength = prefixLength + } +} + +func ServiceNetworkWithParentService(parentService string) func(*ServiceNetworkSettings) { + return func(settings *ServiceNetworkSettings) { + settings.ParentService = parentService + } +} + +func NewServiceNetworkSettings(options ...func(*ServiceNetworkSettings)) *ServiceNetworkSettings { + settings := &ServiceNetworkSettings{ + PrefixLength: 16, // default prefix length + ParentService: "servicenetworking.googleapis.com", // default parent service + } + + for _, o := range options { + o(settings) + } + return settings +} + // BootstrapSharedServiceNetworkingConnection will create a shared network // if it hasn't been created in the test project, a global address // if it hasn't been created in the test project, and a service networking connection @@ -461,8 +490,9 @@ func BootstrapSharedTestGlobalAddress(t *testing.T, testId string, params ...fun // https://cloud.google.com/vpc/docs/configure-private-services-access#removing-connection // // testId specifies the test for which a shared network and a gobal address are used/initialized. -func BootstrapSharedServiceNetworkingConnection(t *testing.T, testId string, params ...func(*AddressSettings)) string { - parentService := "services/servicenetworking.googleapis.com" +func BootstrapSharedServiceNetworkingConnection(t *testing.T, testId string, params ...func(*ServiceNetworkSettings)) string { + settings := NewServiceNetworkSettings(params...) + parentService := "services/" + settings.ParentService projectId := envvar.GetTestProjectFromEnv() config := BootstrapConfig(t) @@ -479,7 +509,7 @@ func BootstrapSharedServiceNetworkingConnection(t *testing.T, testId string, par networkName := SharedTestNetworkPrefix + testId networkId := fmt.Sprintf("projects/%v/global/networks/%v", project.ProjectNumber, networkName) - globalAddressName := BootstrapSharedTestGlobalAddress(t, testId, params...) + globalAddressName := BootstrapSharedTestGlobalAddress(t, testId, AddressWithPrefixLength(settings.PrefixLength)) readCall := config.NewServiceNetworkingClient(config.UserAgent).Services.Connections.List(parentService).Network(networkId) if config.UserProjectOverride { From fd83cc81557bcab5680ef473a4f892ff9f39f106 Mon Sep 17 00:00:00 2001 From: "Stephen Lewis (Burrows)" Date: Fri, 8 Dec 2023 11:46:38 -0800 Subject: [PATCH 37/44] Refactor magician structs (#9605) * Refactored github interfaces Fixed bug in overriding breaking changes * gofmt * Removed GetPullRequestLabelIDs --- .ci/magician/cloudbuild/build_trigger.go | 8 +- .ci/magician/cloudbuild/community.go | 14 ++-- .ci/magician/cloudbuild/init.go | 12 +-- .ci/magician/cmd/community_checker.go | 20 ++--- .ci/magician/cmd/community_checker_test.go | 18 ++++- .ci/magician/cmd/generate_comment.go | 78 +++++++++---------- .ci/magician/cmd/interfaces.go | 34 ++++++++ .ci/magician/cmd/membership_checker.go | 28 ++----- .ci/magician/cmd/membership_checker_test.go | 24 +++++- .ci/magician/cmd/mock_github_test.go | 13 +--- .ci/magician/cmd/test_tgc.go | 2 +- .ci/magician/cmd/test_tpg.go | 2 +- .ci/magician/exec/runner.go | 35 +++------ .ci/magician/github/get.go | 52 +++++-------- .ci/magician/github/init.go | 22 +----- .ci/magician/github/membership.go | 2 +- .ci/magician/github/reviewer_assignment.go | 2 +- .../github/reviewer_assignment_test.go | 4 +- .ci/magician/github/set.go | 12 +-- .ci/magician/main.go | 1 - 20 files changed, 182 insertions(+), 201 deletions(-) create mode 100644 .ci/magician/cmd/interfaces.go diff --git a/.ci/magician/cloudbuild/build_trigger.go b/.ci/magician/cloudbuild/build_trigger.go index 04ecaf80c748..676e5a5b64a6 100644 --- a/.ci/magician/cloudbuild/build_trigger.go +++ b/.ci/magician/cloudbuild/build_trigger.go @@ -5,10 +5,10 @@ import ( "fmt" "os" - "google.golang.org/api/cloudbuild/v1" + cloudbuildv1 "google.golang.org/api/cloudbuild/v1" ) -func (cb cloudBuild) TriggerMMPresubmitRuns(commitSha string, substitutions map[string]string) error { +func (cb *Client) TriggerMMPresubmitRuns(commitSha string, substitutions map[string]string) error { presubmitTriggerId, ok := os.LookupEnv("GENERATE_DIFFS_TRIGGER") if !ok { return fmt.Errorf("did not provide GENERATE_DIFFS_TRIGGER environment variable") @@ -24,12 +24,12 @@ func (cb cloudBuild) TriggerMMPresubmitRuns(commitSha string, substitutions map[ func triggerCloudBuildRun(projectId, triggerId, repoName, commitSha string, substitutions map[string]string) error { ctx := context.Background() - c, err := cloudbuild.NewService(ctx) + c, err := cloudbuildv1.NewService(ctx) if err != nil { return fmt.Errorf("failed to create Cloud Build service client: %s", err) } - repoSource := &cloudbuild.RepoSource{ + repoSource := &cloudbuildv1.RepoSource{ ProjectId: projectId, RepoName: repoName, CommitSha: commitSha, diff --git a/.ci/magician/cloudbuild/community.go b/.ci/magician/cloudbuild/community.go index ff7634b82d7d..09b8d2d6345a 100644 --- a/.ci/magician/cloudbuild/community.go +++ b/.ci/magician/cloudbuild/community.go @@ -5,10 +5,10 @@ import ( "fmt" "os" - "google.golang.org/api/cloudbuild/v1" + cloudbuildv1 "google.golang.org/api/cloudbuild/v1" ) -func (cb cloudBuild) ApproveCommunityChecker(prNumber, commitSha string) error { +func (cb *Client) ApproveCommunityChecker(prNumber, commitSha string) error { buildId, err := getPendingBuildId(PROJECT_ID, commitSha) if err != nil { return err @@ -26,7 +26,7 @@ func (cb cloudBuild) ApproveCommunityChecker(prNumber, commitSha string) error { return nil } -func (cb cloudBuild) GetAwaitingApprovalBuildLink(prNumber, commitSha string) (string, error) { +func (cb *Client) GetAwaitingApprovalBuildLink(prNumber, commitSha string) (string, error) { buildId, err := getPendingBuildId(PROJECT_ID, commitSha) if err != nil { return "", err @@ -49,7 +49,7 @@ func getPendingBuildId(projectId, commitSha string) (string, error) { ctx := context.Background() - c, err := cloudbuild.NewService(ctx) + c, err := cloudbuildv1.NewService(ctx) if err != nil { return "", err } @@ -76,15 +76,15 @@ func getPendingBuildId(projectId, commitSha string) (string, error) { func approveBuild(projectId, buildId string) error { ctx := context.Background() - c, err := cloudbuild.NewService(ctx) + c, err := cloudbuildv1.NewService(ctx) if err != nil { return err } name := fmt.Sprintf("projects/%s/builds/%s", projectId, buildId) - approveBuildRequest := &cloudbuild.ApproveBuildRequest{ - ApprovalResult: &cloudbuild.ApprovalResult{ + approveBuildRequest := &cloudbuildv1.ApproveBuildRequest{ + ApprovalResult: &cloudbuildv1.ApprovalResult{ Decision: "APPROVED", }, } diff --git a/.ci/magician/cloudbuild/init.go b/.ci/magician/cloudbuild/init.go index e2d29face5a2..52f65318aa0c 100644 --- a/.ci/magician/cloudbuild/init.go +++ b/.ci/magician/cloudbuild/init.go @@ -1,14 +1,8 @@ package cloudbuild -type cloudBuild bool - -type CloudBuild interface { - ApproveCommunityChecker(prNumber, commitSha string) error - GetAwaitingApprovalBuildLink(prNumber, commitSha string) (string, error) - TriggerMMPresubmitRuns(commitSha string, substitutions map[string]string) error +type Client struct { } -func NewCloudBuildService() CloudBuild { - var x cloudBuild = true - return x +func NewClient() *Client { + return &Client{} } diff --git a/.ci/magician/cmd/community_checker.go b/.ci/magician/cmd/community_checker.go index f1b8a943d08e..3630bbb323f1 100644 --- a/.ci/magician/cmd/community_checker.go +++ b/.ci/magician/cmd/community_checker.go @@ -12,17 +12,6 @@ import ( "github.com/spf13/cobra" ) -type ccGithub interface { - GetPullRequestAuthor(prNumber string) (string, error) - GetUserType(user string) github.UserType - RemoveLabel(prNumber string, label string) error - PostBuildStatus(prNumber string, title string, state string, targetUrl string, commitSha string) error -} - -type ccCloudbuild interface { - TriggerMMPresubmitRuns(commitSha string, substitutions map[string]string) error -} - // communityApprovalCmd represents the communityApproval command var communityApprovalCmd = &cobra.Command{ Use: "community-checker", @@ -63,13 +52,13 @@ var communityApprovalCmd = &cobra.Command{ baseBranch := args[5] fmt.Println("Base Branch: ", baseBranch) - gh := github.NewGithubService() - cb := cloudbuild.NewCloudBuildService() + gh := github.NewClient() + cb := cloudbuild.NewClient() execCommunityChecker(prNumber, commitSha, branchName, headRepoUrl, headBranch, baseBranch, gh, cb) }, } -func execCommunityChecker(prNumber, commitSha, branchName, headRepoUrl, headBranch, baseBranch string, gh ccGithub, cb ccCloudbuild) { +func execCommunityChecker(prNumber, commitSha, branchName, headRepoUrl, headBranch, baseBranch string, gh GithubClient, cb CloudbuildClient) { substitutions := map[string]string{ "BRANCH_NAME": branchName, "_PR_NUMBER": prNumber, @@ -78,12 +67,13 @@ func execCommunityChecker(prNumber, commitSha, branchName, headRepoUrl, headBran "_BASE_BRANCH": baseBranch, } - author, err := gh.GetPullRequestAuthor(prNumber) + pullRequest, err := gh.GetPullRequest(prNumber) if err != nil { fmt.Println(err) os.Exit(1) } + author := pullRequest.User.Login authorUserType := gh.GetUserType(author) trusted := authorUserType == github.CoreContributorUserType || authorUserType == github.GooglerUserType diff --git a/.ci/magician/cmd/community_checker_test.go b/.ci/magician/cmd/community_checker_test.go index dec9682e50a2..f429a7910cb1 100644 --- a/.ci/magician/cmd/community_checker_test.go +++ b/.ci/magician/cmd/community_checker_test.go @@ -8,7 +8,11 @@ import ( func TestExecCommunityChecker_CoreContributorFlow(t *testing.T) { gh := &mockGithub{ - author: "core_author", + pullRequest: github.PullRequest{ + User: github.User{ + Login: "core_author", + }, + }, userType: github.CoreContributorUserType, calledMethods: make(map[string][][]any), } @@ -34,7 +38,11 @@ func TestExecCommunityChecker_CoreContributorFlow(t *testing.T) { func TestExecCommunityChecker_GooglerFlow(t *testing.T) { gh := &mockGithub{ - author: "googler_author", + pullRequest: github.PullRequest{ + User: github.User{ + Login: "googler_author", + }, + }, userType: github.GooglerUserType, calledMethods: make(map[string][][]any), firstReviewer: "reviewer1", @@ -61,7 +69,11 @@ func TestExecCommunityChecker_GooglerFlow(t *testing.T) { func TestExecCommunityChecker_AmbiguousUserFlow(t *testing.T) { gh := &mockGithub{ - author: "ambiguous_author", + pullRequest: github.PullRequest{ + User: github.User{ + Login: "ambiguous_author", + }, + }, userType: github.CommunityUserType, calledMethods: make(map[string][][]any), firstReviewer: github.GetRandomReviewer(), diff --git a/.ci/magician/cmd/generate_comment.go b/.ci/magician/cmd/generate_comment.go index c38b68009189..2d2aa39f3b73 100644 --- a/.ci/magician/cmd/generate_comment.go +++ b/.ci/magician/cmd/generate_comment.go @@ -12,23 +12,7 @@ import ( "github.com/spf13/cobra" ) -const allowBreakingChangesLabel = 4598495472 - -type gcGithub interface { - GetPullRequestLabelIDs(prNumber string) (map[int]struct{}, error) - PostBuildStatus(prNumber, title, state, targetURL, commitSha string) error - PostComment(prNumber, comment string) error -} - -type gcRunner interface { - GetCWD() string - Copy(src, dest string) error - RemoveAll(path string) error - PushDir(path string) error - PopDir() error - Run(name string, args, env []string) (string, error) - MustRun(name string, args, env []string) string -} +const allowBreakingChangesLabel = "override-breaking-change" type ProviderVersion string @@ -74,8 +58,8 @@ var generateCommentCmd = &cobra.Command{ commit := os.Getenv("COMMIT_SHA") fmt.Println("Commit SHA: ", commit) - pr := os.Getenv("PR_NUMBER") - fmt.Println("PR Number: ", pr) + prNumber := os.Getenv("PR_NUMBER") + fmt.Println("PR Number: ", prNumber) githubToken, ok := os.LookupEnv("GITHUB_TOKEN") if !ok { @@ -83,19 +67,19 @@ var generateCommentCmd = &cobra.Command{ os.Exit(1) } - gh := github.NewGithubService() + gh := github.NewClient() rnr, err := exec.NewRunner() if err != nil { fmt.Println("Error creating a runner: ", err) os.Exit(1) } - execGenerateComment(buildID, projectID, buildStep, commit, pr, githubToken, gh, rnr) + execGenerateComment(buildID, projectID, buildStep, commit, prNumber, githubToken, gh, rnr) }, } -func execGenerateComment(buildID, projectID, buildStep, commit, pr, githubToken string, gh gcGithub, r gcRunner) { - newBranch := "auto-pr-" + pr - oldBranch := "auto-pr-" + pr + "-old" +func execGenerateComment(buildID, projectID, buildStep, commit, prNumber, githubToken string, gh GithubClient, r ExecRunner) { + newBranch := "auto-pr-" + prNumber + oldBranch := "auto-pr-" + prNumber + "-old" wd := r.GetCWD() mmLocalPath := filepath.Join(wd, "..", "..") tpgRepoName := "terraform-provider-google" @@ -173,7 +157,7 @@ func execGenerateComment(buildID, projectID, buildStep, commit, pr, githubToken showBreakingChangesFailed = true } versionedBreakingChanges[repo.Version] = output - err = addLabels(diffProcessorPath, githubToken, pr, r) + err = addLabels(diffProcessorPath, githubToken, prNumber, r) if err != nil { fmt.Println("Error adding TPG labels to PR: ", err) } @@ -204,12 +188,20 @@ The breaking change detector crashed during execution. This is usually due to th if breakingChanges != "" { message += breakingChanges + "\n\n" - labels, err := gh.GetPullRequestLabelIDs(pr) + pullRequest, err := gh.GetPullRequest(prNumber) if err != nil { - fmt.Printf("Error getting pull request labels: %v\n", err) + fmt.Printf("Error getting pull request: %v\n", err) os.Exit(1) } - if _, ok := labels[allowBreakingChangesLabel]; !ok { + + breakingChangesAllowed := false + for _, label := range pullRequest.Labels { + if label.Name == allowBreakingChangesLabel { + breakingChangesAllowed = true + break + } + } + if !breakingChangesAllowed { breakingState = "failure" } } @@ -223,13 +215,13 @@ The breaking change detector crashed during execution. This is usually due to th } } - if err := gh.PostComment(pr, message); err != nil { - fmt.Printf("Error posting comment to PR %s: %v\n", pr, err) + if err := gh.PostComment(prNumber, message); err != nil { + fmt.Printf("Error posting comment to PR %s: %v\n", prNumber, err) } targetURL := fmt.Sprintf("https://console.cloud.google.com/cloud-build/builds;region=global/%s;step=%s?project=%s", buildID, buildStep, projectID) - if err := gh.PostBuildStatus(pr, "terraform-provider-breaking-change-test", breakingState, targetURL, commit); err != nil { - fmt.Printf("Error posting build status for pr %s commit %s: %v\n", pr, commit, err) + if err := gh.PostBuildStatus(prNumber, "terraform-provider-breaking-change-test", breakingState, targetURL, commit); err != nil { + fmt.Printf("Error posting build status for pr %s commit %s: %v\n", prNumber, commit, err) os.Exit(1) } @@ -239,7 +231,7 @@ The breaking change detector crashed during execution. This is usually due to th } if diffs := r.MustRun("git", []string{"diff", "HEAD", "origin/main", "tools/missing-test-detector"}, nil); diffs != "" { fmt.Printf("Found diffs in missing test detector:\n%s\nRunning tests.\n", diffs) - if err := testTools(mmLocalPath, tpgbLocalPath, pr, commit, buildID, buildStep, projectID, gh, r); err != nil { + if err := testTools(mmLocalPath, tpgbLocalPath, prNumber, commit, buildID, buildStep, projectID, gh, r); err != nil { fmt.Printf("Error testing tools in %s: %v\n", mmLocalPath, err) os.Exit(1) } @@ -250,7 +242,7 @@ The breaking change detector crashed during execution. This is usually due to th } } -func cloneAndDiff(repo Repository, oldBranch, newBranch, githubToken string, r gcRunner) (string, error) { +func cloneAndDiff(repo Repository, oldBranch, newBranch, githubToken string, r ExecRunner) (string, error) { // Clone the repo to the desired repo.Path. url := fmt.Sprintf("https://modular-magician:%s@github.com/modular-magician/%s", githubToken, repo.Name) if _, err := r.Run("git", []string{"clone", "-b", newBranch, url, repo.Path}, nil); err != nil { @@ -276,7 +268,7 @@ func cloneAndDiff(repo Repository, oldBranch, newBranch, githubToken string, r g } // Build the diff processor for tpg or tpgb -func buildDiffProcessor(diffProcessorPath, providerLocalPath, oldBranch, newBranch string, r gcRunner) error { +func buildDiffProcessor(diffProcessorPath, providerLocalPath, oldBranch, newBranch string, r ExecRunner) error { if err := r.PushDir(diffProcessorPath); err != nil { return err } @@ -291,7 +283,7 @@ func buildDiffProcessor(diffProcessorPath, providerLocalPath, oldBranch, newBran return r.PopDir() } -func computeBreakingChanges(diffProcessorPath string, r gcRunner) (string, error) { +func computeBreakingChanges(diffProcessorPath string, r ExecRunner) (string, error) { if err := r.PushDir(diffProcessorPath); err != nil { return "", err } @@ -302,11 +294,11 @@ func computeBreakingChanges(diffProcessorPath string, r gcRunner) (string, error return breakingChanges, r.PopDir() } -func addLabels(diffProcessorPath, githubToken, pr string, r gcRunner) error { +func addLabels(diffProcessorPath, githubToken, prNumber string, r ExecRunner) error { if err := r.PushDir(diffProcessorPath); err != nil { return err } - output, err := r.Run("bin/diff-processor", []string{"add-labels", pr}, []string{fmt.Sprintf("GITHUB_TOKEN=%s", githubToken)}) + output, err := r.Run("bin/diff-processor", []string{"add-labels", prNumber}, []string{fmt.Sprintf("GITHUB_TOKEN=%s", githubToken)}) fmt.Println(output) if err != nil { return err @@ -314,7 +306,7 @@ func addLabels(diffProcessorPath, githubToken, pr string, r gcRunner) error { return r.PopDir() } -func cleanDiffProcessor(diffProcessorPath string, r gcRunner) error { +func cleanDiffProcessor(diffProcessorPath string, r ExecRunner) error { for _, path := range []string{"old", "new", "bin"} { if err := r.RemoveAll(filepath.Join(diffProcessorPath, path)); err != nil { return err @@ -368,7 +360,7 @@ An ` + "`override-breaking-change`" + `label can be added to allow merging. // Run the missing test detector and return the results. // Returns an empty string unless there are missing tests. // Error will be nil unless an error occurs during setup. -func detectMissingTests(mmLocalPath, tpgbLocalPath, oldBranch string, r gcRunner) (string, error) { +func detectMissingTests(mmLocalPath, tpgbLocalPath, oldBranch string, r ExecRunner) (string, error) { tpgbLocalPathOld := tpgbLocalPath + "old" if err := r.Copy(tpgbLocalPath, tpgbLocalPathOld); err != nil { @@ -417,7 +409,7 @@ func detectMissingTests(mmLocalPath, tpgbLocalPath, oldBranch string, r gcRunner // Update the provider package name to the given name in the given path. // name should be either "old" or "new". -func updatePackageName(name, path string, r gcRunner) error { +func updatePackageName(name, path string, r ExecRunner) error { oldPackageName := "github.com/hashicorp/terraform-provider-google-beta" newPackageName := "google/provider/" + name fmt.Printf("Updating package name in %s from %s to %s\n", path, oldPackageName, newPackageName) @@ -438,7 +430,7 @@ func updatePackageName(name, path string, r gcRunner) error { // Run unit tests for the missing test detector and diff processor. // Report results using Github API. -func testTools(mmLocalPath, tpgbLocalPath, pr, commit, buildID, buildStep, projectID string, gh gcGithub, r gcRunner) error { +func testTools(mmLocalPath, tpgbLocalPath, prNumber, commit, buildID, buildStep, projectID string, gh GithubClient, r ExecRunner) error { missingTestDetectorPath := filepath.Join(mmLocalPath, "tools", "missing-test-detector") r.PushDir(missingTestDetectorPath) if _, err := r.Run("go", []string{"mod", "tidy"}, nil); err != nil { @@ -451,7 +443,7 @@ func testTools(mmLocalPath, tpgbLocalPath, pr, commit, buildID, buildStep, proje state = "failure" } targetURL := fmt.Sprintf("https://console.cloud.google.com/cloud-build/builds;region=global/%s;step=%s?project=%s", buildID, buildStep, projectID) - if err := gh.PostBuildStatus(pr, "unit-tests-missing-test-detector", state, targetURL, commit); err != nil { + if err := gh.PostBuildStatus(prNumber, "unit-tests-missing-test-detector", state, targetURL, commit); err != nil { return err } return r.PopDir() diff --git a/.ci/magician/cmd/interfaces.go b/.ci/magician/cmd/interfaces.go new file mode 100644 index 000000000000..ef99df1723bd --- /dev/null +++ b/.ci/magician/cmd/interfaces.go @@ -0,0 +1,34 @@ +package cmd + +import ( + "magician/github" +) + +type GithubClient interface { + GetPullRequest(prNumber string) (github.PullRequest, error) + GetPullRequestRequestedReviewer(prNumber string) (string, error) + GetPullRequestPreviousAssignedReviewers(prNumber string) ([]string, error) + GetUserType(user string) github.UserType + PostBuildStatus(prNumber, title, state, targetURL, commitSha string) error + PostComment(prNumber, comment string) error + RequestPullRequestReviewer(prNumber, assignee string) error + AddLabel(prNumber, label string) error + RemoveLabel(prNumber, label string) error + CreateWorkflowDispatchEvent(workflowFileName string, inputs map[string]any) error +} + +type CloudbuildClient interface { + ApproveCommunityChecker(prNumber, commitSha string) error + GetAwaitingApprovalBuildLink(prNumber, commitSha string) (string, error) + TriggerMMPresubmitRuns(commitSha string, substitutions map[string]string) error +} + +type ExecRunner interface { + GetCWD() string + Copy(src, dest string) error + RemoveAll(path string) error + PushDir(path string) error + PopDir() error + Run(name string, args, env []string) (string, error) + MustRun(name string, args, env []string) string +} diff --git a/.ci/magician/cmd/membership_checker.go b/.ci/magician/cmd/membership_checker.go index df837ff81e33..b5fcd8b85160 100644 --- a/.ci/magician/cmd/membership_checker.go +++ b/.ci/magician/cmd/membership_checker.go @@ -12,23 +12,6 @@ import ( "github.com/spf13/cobra" ) -type mcGithub interface { - GetPullRequestAuthor(prNumber string) (string, error) - GetUserType(user string) github.UserType - GetPullRequestRequestedReviewer(prNumber string) (string, error) - GetPullRequestPreviousAssignedReviewers(prNumber string) ([]string, error) - RequestPullRequestReviewer(prNumber string, reviewer string) error - PostComment(prNumber string, comment string) error - AddLabel(prNumber string, label string) error - PostBuildStatus(prNumber string, title string, state string, targetUrl string, commitSha string) error -} - -type mcCloudbuild interface { - ApproveCommunityChecker(prNumber, commitSha string) error - GetAwaitingApprovalBuildLink(prNumber, commitSha string) (string, error) - TriggerMMPresubmitRuns(commitSha string, substitutions map[string]string) error -} - // membershipCheckerCmd represents the membershipChecker command var membershipCheckerCmd = &cobra.Command{ Use: "membership-checker", @@ -77,13 +60,13 @@ var membershipCheckerCmd = &cobra.Command{ baseBranch := args[5] fmt.Println("Base Branch: ", baseBranch) - gh := github.NewGithubService() - cb := cloudbuild.NewCloudBuildService() + gh := github.NewClient() + cb := cloudbuild.NewClient() execMembershipChecker(prNumber, commitSha, branchName, headRepoUrl, headBranch, baseBranch, gh, cb) }, } -func execMembershipChecker(prNumber, commitSha, branchName, headRepoUrl, headBranch, baseBranch string, gh mcGithub, cb mcCloudbuild) { +func execMembershipChecker(prNumber, commitSha, branchName, headRepoUrl, headBranch, baseBranch string, gh GithubClient, cb CloudbuildClient) { substitutions := map[string]string{ "BRANCH_NAME": branchName, "_PR_NUMBER": prNumber, @@ -92,12 +75,13 @@ func execMembershipChecker(prNumber, commitSha, branchName, headRepoUrl, headBra "_BASE_BRANCH": baseBranch, } - author, err := gh.GetPullRequestAuthor(prNumber) + pullRequest, err := gh.GetPullRequest(prNumber) if err != nil { fmt.Println(err) os.Exit(1) } + author := pullRequest.User.Login authorUserType := gh.GetUserType(author) trusted := authorUserType == github.CoreContributorUserType || authorUserType == github.GooglerUserType @@ -116,7 +100,7 @@ func execMembershipChecker(prNumber, commitSha, branchName, headRepoUrl, headBra os.Exit(1) } - reviewersToRequest, newPrimaryReviewer := github.ChooseReviewers(firstRequestedReviewer, previouslyInvolvedReviewers) + reviewersToRequest, newPrimaryReviewer := github.ChooseCoreReviewers(firstRequestedReviewer, previouslyInvolvedReviewers) for _, reviewer := range reviewersToRequest { err = gh.RequestPullRequestReviewer(prNumber, reviewer) diff --git a/.ci/magician/cmd/membership_checker_test.go b/.ci/magician/cmd/membership_checker_test.go index b79f3fd13699..184c9ec5d8eb 100644 --- a/.ci/magician/cmd/membership_checker_test.go +++ b/.ci/magician/cmd/membership_checker_test.go @@ -9,7 +9,11 @@ import ( func TestExecMembershipChecker_CoreContributorFlow(t *testing.T) { gh := &mockGithub{ - author: "core_author", + pullRequest: github.PullRequest{ + User: github.User{ + Login: "core_author", + }, + }, userType: github.CoreContributorUserType, calledMethods: make(map[string][][]any), } @@ -43,7 +47,11 @@ func TestExecMembershipChecker_CoreContributorFlow(t *testing.T) { func TestExecMembershipChecker_GooglerFlow(t *testing.T) { gh := &mockGithub{ - author: "googler_author", + pullRequest: github.PullRequest{ + User: github.User{ + Login: "googler_author", + }, + }, userType: github.GooglerUserType, calledMethods: make(map[string][][]any), firstReviewer: "reviewer1", @@ -87,7 +95,11 @@ func TestExecMembershipChecker_GooglerFlow(t *testing.T) { func TestExecMembershipChecker_AmbiguousUserFlow(t *testing.T) { gh := &mockGithub{ - author: "ambiguous_author", + pullRequest: github.PullRequest{ + User: github.User{ + Login: "ambiguous_author", + }, + }, userType: github.CommunityUserType, calledMethods: make(map[string][][]any), firstReviewer: github.GetRandomReviewer(), @@ -139,7 +151,11 @@ func TestExecMembershipChecker_AmbiguousUserFlow(t *testing.T) { func TestExecMembershipChecker_CommentForNewPrimaryReviewer(t *testing.T) { gh := &mockGithub{ - author: "googler_author", + pullRequest: github.PullRequest{ + User: github.User{ + Login: "googler_author", + }, + }, userType: github.GooglerUserType, calledMethods: make(map[string][][]any), firstReviewer: "", diff --git a/.ci/magician/cmd/mock_github_test.go b/.ci/magician/cmd/mock_github_test.go index dbd20f582a32..49bd98b624bb 100644 --- a/.ci/magician/cmd/mock_github_test.go +++ b/.ci/magician/cmd/mock_github_test.go @@ -3,16 +3,16 @@ package cmd import "magician/github" type mockGithub struct { - author string + pullRequest github.PullRequest userType github.UserType firstReviewer string previousReviewers []string calledMethods map[string][][]any } -func (m *mockGithub) GetPullRequestAuthor(prNumber string) (string, error) { - m.calledMethods["GetPullRequestAuthor"] = append(m.calledMethods["GetPullRequestAuthor"], []any{prNumber}) - return m.author, nil +func (m *mockGithub) GetPullRequest(prNumber string) (github.PullRequest, error) { + m.calledMethods["GetPullRequest"] = append(m.calledMethods["GetPullRequest"], []any{prNumber}) + return m.pullRequest, nil } func (m *mockGithub) GetUserType(user string) github.UserType { @@ -59,8 +59,3 @@ func (m *mockGithub) CreateWorkflowDispatchEvent(workflowFileName string, inputs m.calledMethods["CreateWorkflowDispatchEvent"] = append(m.calledMethods["CreateWorkflowDispatchEvent"], []any{workflowFileName, inputs}) return nil } - -func (m *mockGithub) GetPullRequestLabelIDs(prNumber string) (map[int]struct{}, error) { - m.calledMethods["GetPullRequestLabelIDs"] = append(m.calledMethods["GetPullRequestLabelIDs"], []any{prNumber}) - return nil, nil -} diff --git a/.ci/magician/cmd/test_tgc.go b/.ci/magician/cmd/test_tgc.go index 109cd1fea32b..a5610daaedb9 100644 --- a/.ci/magician/cmd/test_tgc.go +++ b/.ci/magician/cmd/test_tgc.go @@ -21,7 +21,7 @@ var testTGCCmd = &cobra.Command{ commit := os.Getenv("COMMIT_SHA") pr := os.Getenv("PR_NUMBER") - gh := github.NewGithubService() + gh := github.NewClient() execTestTGC(commit, pr, gh) }, diff --git a/.ci/magician/cmd/test_tpg.go b/.ci/magician/cmd/test_tpg.go index b5543d18899a..75398c7a4281 100644 --- a/.ci/magician/cmd/test_tpg.go +++ b/.ci/magician/cmd/test_tpg.go @@ -27,7 +27,7 @@ var testTPGCmd = &cobra.Command{ commit := os.Getenv("COMMIT_SHA") pr := os.Getenv("PR_NUMBER") - gh := github.NewGithubService() + gh := github.NewClient() execTestTPG(version, commit, pr, gh) }, diff --git a/.ci/magician/exec/runner.go b/.ci/magician/exec/runner.go index c9423237b285..3bc21e23116d 100644 --- a/.ci/magician/exec/runner.go +++ b/.ci/magician/exec/runner.go @@ -12,47 +12,36 @@ import ( cp "github.com/otiai10/copy" ) -type actualRunner struct { +type Runner struct { cwd string dirStack *list.List } -type Runner interface { - GetCWD() string - Copy(src, dest string) error - RemoveAll(path string) error - PushDir(path string) error - PopDir() error - WriteFile(name, data string) error - Run(name string, args, env []string) (string, error) - MustRun(name string, args, env []string) string -} - -func NewRunner() (Runner, error) { +func NewRunner() (*Runner, error) { wd, err := os.Getwd() if err != nil { return nil, err } - return &actualRunner{ + return &Runner{ cwd: wd, dirStack: list.New(), }, nil } -func (ar *actualRunner) GetCWD() string { +func (ar *Runner) GetCWD() string { return ar.cwd } -func (ar *actualRunner) Copy(src, dest string) error { +func (ar *Runner) Copy(src, dest string) error { return cp.Copy(ar.abs(src), ar.abs(dest)) } -func (ar *actualRunner) RemoveAll(path string) error { +func (ar *Runner) RemoveAll(path string) error { return os.RemoveAll(ar.abs(path)) } // PushDir changes the directory for the runner to the desired path and saves the previous directory in the stack. -func (ar *actualRunner) PushDir(path string) error { +func (ar *Runner) PushDir(path string) error { if ar.dirStack == nil { return errors.New("attempted to push dir, but stack was nil") } @@ -62,7 +51,7 @@ func (ar *actualRunner) PushDir(path string) error { } // PopDir removes the most recently added directory from the stack and changes front to it. -func (ar *actualRunner) PopDir() error { +func (ar *Runner) PopDir() error { if ar.dirStack == nil || ar.dirStack.Len() == 0 { return errors.New("attempted to pop dir, but stack was nil or empty") } @@ -75,11 +64,11 @@ func (ar *actualRunner) PopDir() error { return nil } -func (ar *actualRunner) WriteFile(name, data string) error { +func (ar *Runner) WriteFile(name, data string) error { return os.WriteFile(ar.abs(name), []byte(data), 0644) } -func (ar *actualRunner) Run(name string, args, env []string) (string, error) { +func (ar *Runner) Run(name string, args, env []string) (string, error) { cmd := exec.Command(name, args...) cmd.Dir = ar.cwd cmd.Env = append(os.Environ(), env...) @@ -91,7 +80,7 @@ func (ar *actualRunner) Run(name string, args, env []string) (string, error) { return string(out), nil } -func (ar *actualRunner) MustRun(name string, args, env []string) string { +func (ar *Runner) MustRun(name string, args, env []string) string { out, err := ar.Run(name, args, env) if err != nil { log.Fatal(err) @@ -99,7 +88,7 @@ func (ar *actualRunner) MustRun(name string, args, env []string) string { return out } -func (ar *actualRunner) abs(path string) string { +func (ar *Runner) abs(path string) string { if !filepath.IsAbs(path) { return filepath.Join(ar.cwd, path) } diff --git a/.ci/magician/github/get.go b/.ci/magician/github/get.go index 48f3cdab1a3e..30df68d4db4a 100644 --- a/.ci/magician/github/get.go +++ b/.ci/magician/github/get.go @@ -5,24 +5,35 @@ import ( utils "magician/utility" ) -func (gh *github) GetPullRequestAuthor(prNumber string) (string, error) { +type User struct { + Login string `json:"login"` +} + +type Label struct { + Name string `json:"name"` +} + +type PullRequest struct { + User struct { + Login string `json:"login"` + } `json:"user"` + Labels []Label `json:"labels"` +} + +func (gh *Client) GetPullRequest(prNumber string) (PullRequest, error) { url := fmt.Sprintf("https://api.github.com/repos/GoogleCloudPlatform/magic-modules/issues/%s", prNumber) - var pullRequest struct { - User struct { - Login string `json:"login"` - } `json:"user"` - } + var pullRequest PullRequest _, err := utils.RequestCall(url, "GET", gh.token, &pullRequest, nil) if err != nil { - return "", err + return pullRequest, err } - return pullRequest.User.Login, nil + return pullRequest, nil } -func (gh *github) GetPullRequestRequestedReviewer(prNumber string) (string, error) { +func (gh *Client) GetPullRequestRequestedReviewer(prNumber string) (string, error) { url := fmt.Sprintf("https://api.github.com/repos/GoogleCloudPlatform/magic-modules/pulls/%s/requested_reviewers", prNumber) var requestedReviewers struct { @@ -43,7 +54,7 @@ func (gh *github) GetPullRequestRequestedReviewer(prNumber string) (string, erro return requestedReviewers.Users[0].Login, nil } -func (gh *github) GetPullRequestPreviousAssignedReviewers(prNumber string) ([]string, error) { +func (gh *Client) GetPullRequestPreviousAssignedReviewers(prNumber string) ([]string, error) { url := fmt.Sprintf("https://api.github.com/repos/GoogleCloudPlatform/magic-modules/pulls/%s/reviews", prNumber) var reviews []struct { @@ -69,24 +80,3 @@ func (gh *github) GetPullRequestPreviousAssignedReviewers(prNumber string) ([]st return result, nil } - -func (gh *github) GetPullRequestLabelIDs(prNumber string) (map[int]struct{}, error) { - url := fmt.Sprintf("https://api.github.com/repos/GoogleCloudPlatform/magic-modules/pulls/%s/reviews", prNumber) - - var labels []struct { - Label struct { - ID int `json:"id"` - } `json:"label"` - } - - if _, err := utils.RequestCall(url, "GET", gh.token, &labels, nil); err != nil { - return nil, err - } - - var result map[int]struct{} - for _, label := range labels { - result[label.Label.ID] = struct{}{} - } - - return result, nil -} diff --git a/.ci/magician/github/init.go b/.ci/magician/github/init.go index 3822a0274c5f..ff63ae2c1ef5 100644 --- a/.ci/magician/github/init.go +++ b/.ci/magician/github/init.go @@ -5,31 +5,17 @@ import ( "os" ) -// GithubService represents the service for GitHub interactions. -type github struct { +// Client for GitHub interactions. +type Client struct { token string } -type GithubService interface { - GetPullRequestAuthor(prNumber string) (string, error) - GetPullRequestRequestedReviewer(prNumber string) (string, error) - GetPullRequestPreviousAssignedReviewers(prNumber string) ([]string, error) - GetPullRequestLabelIDs(prNumber string) (map[int]struct{}, error) - GetUserType(user string) UserType - PostBuildStatus(prNumber, title, state, targetURL, commitSha string) error - PostComment(prNumber, comment string) error - RequestPullRequestReviewer(prNumber, assignee string) error - AddLabel(prNumber, label string) error - RemoveLabel(prNumber, label string) error - CreateWorkflowDispatchEvent(workflowFileName string, inputs map[string]any) error -} - -func NewGithubService() GithubService { +func NewClient() *Client { githubToken, ok := os.LookupEnv("GITHUB_TOKEN") if !ok { fmt.Println("Did not provide GITHUB_TOKEN environment variable") os.Exit(1) } - return &github{token: githubToken} + return &Client{token: githubToken} } diff --git a/.ci/magician/github/membership.go b/.ci/magician/github/membership.go index 98af81827746..adac576f2f68 100644 --- a/.ci/magician/github/membership.go +++ b/.ci/magician/github/membership.go @@ -56,7 +56,7 @@ func (ut UserType) String() string { } } -func (gh *github) GetUserType(user string) UserType { +func (gh *Client) GetUserType(user string) UserType { if isTeamMember(user, gh.token) { fmt.Println("User is a team member") return CoreContributorUserType diff --git a/.ci/magician/github/reviewer_assignment.go b/.ci/magician/github/reviewer_assignment.go index b3e968145d9f..820bb76a4f81 100644 --- a/.ci/magician/github/reviewer_assignment.go +++ b/.ci/magician/github/reviewer_assignment.go @@ -14,7 +14,7 @@ var ( ) // Returns a list of users to request review from, as well as a new primary reviewer if this is the first run. -func ChooseReviewers(firstRequestedReviewer string, previouslyInvolvedReviewers []string) (reviewersToRequest []string, newPrimaryReviewer string) { +func ChooseCoreReviewers(firstRequestedReviewer string, previouslyInvolvedReviewers []string) (reviewersToRequest []string, newPrimaryReviewer string) { hasPrimaryReviewer := false newPrimaryReviewer = "" diff --git a/.ci/magician/github/reviewer_assignment_test.go b/.ci/magician/github/reviewer_assignment_test.go index b96ac59a7389..779d308cfb22 100644 --- a/.ci/magician/github/reviewer_assignment_test.go +++ b/.ci/magician/github/reviewer_assignment_test.go @@ -9,7 +9,7 @@ import ( "golang.org/x/exp/slices" ) -func TestChooseReviewers(t *testing.T) { +func TestChooseCoreReviewers(t *testing.T) { cases := map[string]struct { FirstRequestedReviewer string PreviouslyInvolvedReviewers []string @@ -57,7 +57,7 @@ func TestChooseReviewers(t *testing.T) { tc := tc t.Run(tn, func(t *testing.T) { t.Parallel() - reviewers, primaryReviewer := ChooseReviewers(tc.FirstRequestedReviewer, tc.PreviouslyInvolvedReviewers) + reviewers, primaryReviewer := ChooseCoreReviewers(tc.FirstRequestedReviewer, tc.PreviouslyInvolvedReviewers) if tc.ExpectPrimaryReviewer && primaryReviewer == "" { t.Error("wanted primary reviewer to be returned; got none") } diff --git a/.ci/magician/github/set.go b/.ci/magician/github/set.go index 90f116f673fb..e1575a4e505c 100644 --- a/.ci/magician/github/set.go +++ b/.ci/magician/github/set.go @@ -6,7 +6,7 @@ import ( "net/http" ) -func (gh *github) PostBuildStatus(prNumber, title, state, targetURL, commitSha string) error { +func (gh *Client) PostBuildStatus(prNumber, title, state, targetURL, commitSha string) error { url := fmt.Sprintf("https://api.github.com/repos/GoogleCloudPlatform/magic-modules/statuses/%s", commitSha) postBody := map[string]string{ @@ -25,7 +25,7 @@ func (gh *github) PostBuildStatus(prNumber, title, state, targetURL, commitSha s return nil } -func (gh *github) PostComment(prNumber, comment string) error { +func (gh *Client) PostComment(prNumber, comment string) error { url := fmt.Sprintf("https://api.github.com/repos/GoogleCloudPlatform/magic-modules/issues/%s/comments", prNumber) body := map[string]string{ @@ -46,7 +46,7 @@ func (gh *github) PostComment(prNumber, comment string) error { return nil } -func (gh *github) RequestPullRequestReviewer(prNumber, assignee string) error { +func (gh *Client) RequestPullRequestReviewer(prNumber, assignee string) error { url := fmt.Sprintf("https://api.github.com/repos/GoogleCloudPlatform/magic-modules/pulls/%s/requested_reviewers", prNumber) body := map[string][]string{ @@ -68,7 +68,7 @@ func (gh *github) RequestPullRequestReviewer(prNumber, assignee string) error { return nil } -func (gh *github) AddLabel(prNumber, label string) error { +func (gh *Client) AddLabel(prNumber, label string) error { url := fmt.Sprintf("https://api.github.com/repos/GoogleCloudPlatform/magic-modules/issues/%s/labels", prNumber) body := map[string][]string{ @@ -84,7 +84,7 @@ func (gh *github) AddLabel(prNumber, label string) error { } -func (gh *github) RemoveLabel(prNumber, label string) error { +func (gh *Client) RemoveLabel(prNumber, label string) error { url := fmt.Sprintf("https://api.github.com/repos/GoogleCloudPlatform/magic-modules/issues/%s/labels/%s", prNumber, label) _, err := utils.RequestCall(url, "DELETE", gh.token, nil, nil) @@ -95,7 +95,7 @@ func (gh *github) RemoveLabel(prNumber, label string) error { return nil } -func (gh *github) CreateWorkflowDispatchEvent(workflowFileName string, inputs map[string]any) error { +func (gh *Client) CreateWorkflowDispatchEvent(workflowFileName string, inputs map[string]any) error { url := fmt.Sprintf("https://api.github.com/repos/GoogleCloudPlatform/magic-modules/actions/workflows/%s/dispatches", workflowFileName) resp, err := utils.RequestCall(url, "POST", gh.token, nil, map[string]any{ "ref": "main", diff --git a/.ci/magician/main.go b/.ci/magician/main.go index 7cf107ae4fbc..41c664080c04 100644 --- a/.ci/magician/main.go +++ b/.ci/magician/main.go @@ -1,6 +1,5 @@ /* Copyright © 2023 NAME HERE - */ package main From dadb91b07109f0cc2ac9e08822424bac8f8e8f92 Mon Sep 17 00:00:00 2001 From: Cameron Thornton Date: Fri, 8 Dec 2023 13:50:56 -0600 Subject: [PATCH 38/44] Upgrade DCL to v1.59.0 (#9565) --- mmv1/third_party/terraform/go.mod.erb | 2 +- mmv1/third_party/terraform/go.sum | 2 ++ .../recaptchaenterprise/samples/waf.key.json | 21 ++++++++++++++ .../recaptchaenterprise/samples/waf_key.yaml | 22 ++++++++++++++ tpgtools/go.mod | 2 +- tpgtools/go.sum | 4 +++ .../samples/nodepool/basic.tf.tmpl | 4 +++ .../samples/nodepool/basic_update.tf.tmpl | 4 +++ .../samples/nodepool/beta_basic.tf.tmpl | 4 +++ .../nodepool/beta_basic_update.tf.tmpl | 4 +++ tpgtools/overrides/orgpolicy/beta/policy.yaml | 8 +++-- .../samples/policy/dry_run_spec.tf.tmpl | 29 +++++++++++++++++++ .../samples/policy/dry_run_spec.yaml | 3 ++ 13 files changed, 105 insertions(+), 4 deletions(-) create mode 100755 tpgtools/api/recaptchaenterprise/samples/waf.key.json create mode 100755 tpgtools/api/recaptchaenterprise/samples/waf_key.yaml create mode 100644 tpgtools/overrides/orgpolicy/samples/policy/dry_run_spec.tf.tmpl create mode 100644 tpgtools/overrides/orgpolicy/samples/policy/dry_run_spec.yaml diff --git a/mmv1/third_party/terraform/go.mod.erb b/mmv1/third_party/terraform/go.mod.erb index e887f11fecab..4a7de9491de0 100644 --- a/mmv1/third_party/terraform/go.mod.erb +++ b/mmv1/third_party/terraform/go.mod.erb @@ -4,7 +4,7 @@ go 1.20 require ( cloud.google.com/go/bigtable v1.19.0 - github.com/GoogleCloudPlatform/declarative-resource-client-library v1.57.0 + github.com/GoogleCloudPlatform/declarative-resource-client-library v1.59.0 github.com/apparentlymart/go-cidr v1.1.0 github.com/davecgh/go-spew v1.1.1 github.com/dnaeon/go-vcr v1.0.1 diff --git a/mmv1/third_party/terraform/go.sum b/mmv1/third_party/terraform/go.sum index 1644657b191a..62430b733c77 100644 --- a/mmv1/third_party/terraform/go.sum +++ b/mmv1/third_party/terraform/go.sum @@ -17,6 +17,8 @@ cloud.google.com/go/longrunning v0.5.4/go.mod h1:zqNVncI0BOP8ST6XQD1+VcvuShMmq7+ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/GoogleCloudPlatform/declarative-resource-client-library v1.57.0 h1:Rz/Jlnt195m9B8CJPQejuTbXaPCoB1w1QYQjD4oKHMk= github.com/GoogleCloudPlatform/declarative-resource-client-library v1.57.0/go.mod h1:pL2Qt5HT+x6xrTd806oMiM3awW6kNIXB/iiuClz6m6k= +github.com/GoogleCloudPlatform/declarative-resource-client-library v1.59.0 h1:jL4ac+IUrVftmfduFslaMXWj9ENuiXEiwZFw3U5ikUA= +github.com/GoogleCloudPlatform/declarative-resource-client-library v1.59.0/go.mod h1:pL2Qt5HT+x6xrTd806oMiM3awW6kNIXB/iiuClz6m6k= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= github.com/Microsoft/go-winio v0.4.16 h1:FtSW/jqD+l4ba5iPBj9CODVtgfYAD8w2wS923g/cFDk= github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= diff --git a/tpgtools/api/recaptchaenterprise/samples/waf.key.json b/tpgtools/api/recaptchaenterprise/samples/waf.key.json new file mode 100755 index 000000000000..1e9f5c12750f --- /dev/null +++ b/tpgtools/api/recaptchaenterprise/samples/waf.key.json @@ -0,0 +1,21 @@ +{ + "displayName": "display-name-one", + "project": "{{project}}", + "webSettings": { + "allowAllDomains": true, + "allowedDomains": [], + "integrationType": "INVISIBLE", + "challengeSecurityPreference": "USABILITY" + }, + "wafSettings": { + "wafFeature": "CHALLENGE_PAGE", + "wafService": "CA" + }, + "testingOptions": { + "testingScore": 0.5, + "testingChallenge": "NOCAPTCHA" + }, + "labels": { + "label-one": "value-one" + } +} diff --git a/tpgtools/api/recaptchaenterprise/samples/waf_key.yaml b/tpgtools/api/recaptchaenterprise/samples/waf_key.yaml new file mode 100755 index 000000000000..db726a20f886 --- /dev/null +++ b/tpgtools/api/recaptchaenterprise/samples/waf_key.yaml @@ -0,0 +1,22 @@ +# Copyright 2023 Google LLC. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +name: waf_key +description: A basic test of recaptcha enterprise key that includes WAF settings +type: key +versions: +- ga +resource: samples/waf.key.json +variables: +- name: project + type: project diff --git a/tpgtools/go.mod b/tpgtools/go.mod index b8256b7777fc..f983fe37b2ff 100644 --- a/tpgtools/go.mod +++ b/tpgtools/go.mod @@ -4,7 +4,7 @@ go 1.20 require ( bitbucket.org/creachadair/stringset v0.0.11 - github.com/GoogleCloudPlatform/declarative-resource-client-library v1.57.0 + github.com/GoogleCloudPlatform/declarative-resource-client-library v1.59.0 github.com/golang/glog v1.1.2 github.com/hashicorp/hcl v1.0.0 github.com/kylelemons/godebug v1.1.0 diff --git a/tpgtools/go.sum b/tpgtools/go.sum index c38592fc1e81..3fa5dd2136d1 100644 --- a/tpgtools/go.sum +++ b/tpgtools/go.sum @@ -8,6 +8,10 @@ cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2Aawl github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/GoogleCloudPlatform/declarative-resource-client-library v1.57.0 h1:Rz/Jlnt195m9B8CJPQejuTbXaPCoB1w1QYQjD4oKHMk= github.com/GoogleCloudPlatform/declarative-resource-client-library v1.57.0/go.mod h1:pL2Qt5HT+x6xrTd806oMiM3awW6kNIXB/iiuClz6m6k= +github.com/GoogleCloudPlatform/declarative-resource-client-library v1.58.0 h1:BePRfJS3N4ZNUzn+Z5vKMthuoitcvmD7Yw14X8BM60c= +github.com/GoogleCloudPlatform/declarative-resource-client-library v1.58.0/go.mod h1:pL2Qt5HT+x6xrTd806oMiM3awW6kNIXB/iiuClz6m6k= +github.com/GoogleCloudPlatform/declarative-resource-client-library v1.59.0 h1:jL4ac+IUrVftmfduFslaMXWj9ENuiXEiwZFw3U5ikUA= +github.com/GoogleCloudPlatform/declarative-resource-client-library v1.59.0/go.mod h1:pL2Qt5HT+x6xrTd806oMiM3awW6kNIXB/iiuClz6m6k= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= diff --git a/tpgtools/overrides/containerazure/samples/nodepool/basic.tf.tmpl b/tpgtools/overrides/containerazure/samples/nodepool/basic.tf.tmpl index 8ea45ee3f996..90b33a363a31 100644 --- a/tpgtools/overrides/containerazure/samples/nodepool/basic.tf.tmpl +++ b/tpgtools/overrides/containerazure/samples/nodepool/basic.tf.tmpl @@ -73,6 +73,10 @@ resource "google_container_azure_node_pool" "primary" { owner = "mmv2" } + labels = { + key_one = "label_one" + } + vm_size = "Standard_DS2_v2" } diff --git a/tpgtools/overrides/containerazure/samples/nodepool/basic_update.tf.tmpl b/tpgtools/overrides/containerazure/samples/nodepool/basic_update.tf.tmpl index 2786c74963b7..267a053c5b45 100644 --- a/tpgtools/overrides/containerazure/samples/nodepool/basic_update.tf.tmpl +++ b/tpgtools/overrides/containerazure/samples/nodepool/basic_update.tf.tmpl @@ -74,6 +74,10 @@ resource "google_container_azure_node_pool" "primary" { owner = "mmv2" } + labels = { + key_two = "label_two" + } + vm_size = "Standard_DS2_v2" } diff --git a/tpgtools/overrides/containerazure/samples/nodepool/beta_basic.tf.tmpl b/tpgtools/overrides/containerazure/samples/nodepool/beta_basic.tf.tmpl index 4967311d5107..8dc605998b2c 100644 --- a/tpgtools/overrides/containerazure/samples/nodepool/beta_basic.tf.tmpl +++ b/tpgtools/overrides/containerazure/samples/nodepool/beta_basic.tf.tmpl @@ -77,6 +77,10 @@ resource "google_container_azure_node_pool" "primary" { owner = "mmv2" } + labels = { + key_one = "label_one" + } + vm_size = "Standard_DS2_v2" image_type = "ubuntu" diff --git a/tpgtools/overrides/containerazure/samples/nodepool/beta_basic_update.tf.tmpl b/tpgtools/overrides/containerazure/samples/nodepool/beta_basic_update.tf.tmpl index 2156af0dc662..bce21faa8001 100644 --- a/tpgtools/overrides/containerazure/samples/nodepool/beta_basic_update.tf.tmpl +++ b/tpgtools/overrides/containerazure/samples/nodepool/beta_basic_update.tf.tmpl @@ -78,6 +78,10 @@ resource "google_container_azure_node_pool" "primary" { owner = "mmv2" } + labels = { + key_two = "label_two" + } + vm_size = "Standard_DS2_v2" image_type = "ubuntu" diff --git a/tpgtools/overrides/orgpolicy/beta/policy.yaml b/tpgtools/overrides/orgpolicy/beta/policy.yaml index e31fa2fc2c0a..08ff9e6b371f 100644 --- a/tpgtools/overrides/orgpolicy/beta/policy.yaml +++ b/tpgtools/overrides/orgpolicy/beta/policy.yaml @@ -7,5 +7,9 @@ field: spec.rules.deny_all - type: ENUM_BOOL field: spec.rules.enforce -- type: EXCLUDE - field: dry_run_spec +- type: ENUM_BOOL + field: dry_run_spec.rules.allow_all +- type: ENUM_BOOL + field: dry_run_spec.rules.deny_all +- type: ENUM_BOOL + field: dry_run_spec.rules.enforce \ No newline at end of file diff --git a/tpgtools/overrides/orgpolicy/samples/policy/dry_run_spec.tf.tmpl b/tpgtools/overrides/orgpolicy/samples/policy/dry_run_spec.tf.tmpl new file mode 100644 index 000000000000..1c66f04c389b --- /dev/null +++ b/tpgtools/overrides/orgpolicy/samples/policy/dry_run_spec.tf.tmpl @@ -0,0 +1,29 @@ +resource "google_org_policy_custom_constraint" "constraint" { + name = "custom.disableGkeAutoUpgrade%{random_suffix}" + parent = "organizations/{{org_id}}" + display_name = "Disable GKE auto upgrade" + description = "Only allow GKE NodePool resource to be created or updated if AutoUpgrade is not enabled where this custom constraint is enforced." + + action_type = "ALLOW" + condition = "resource.management.autoUpgrade == false" + method_types = ["CREATE"] + resource_types = ["container.googleapis.com/NodePool"] +} + +resource "google_org_policy_policy" "primary" { + name = "organizations/{{org_id}}/policies/${google_org_policy_custom_constraint.constraint.name}" + parent = "organizations/{{org_id}}" + + spec { + rules { + enforce = "FALSE" + } + } + dry_run_spec { + inherit_from_parent = false + reset = false + rules { + enforce = "FALSE" + } + } +} diff --git a/tpgtools/overrides/orgpolicy/samples/policy/dry_run_spec.yaml b/tpgtools/overrides/orgpolicy/samples/policy/dry_run_spec.yaml new file mode 100644 index 000000000000..48afc1aedb93 --- /dev/null +++ b/tpgtools/overrides/orgpolicy/samples/policy/dry_run_spec.yaml @@ -0,0 +1,3 @@ +variables: + - name: "org_id" + type: "org_id" \ No newline at end of file From 1c04820a4aa5b0cdae510ddeb546528c61ff18ea Mon Sep 17 00:00:00 2001 From: Sarah French <15078782+SarahFrench@users.noreply.github.com> Date: Fri, 8 Dec 2023 19:54:37 +0000 Subject: [PATCH 39/44] Add missing attribute reference and closing bracket to test (#9600) --- .../vmwareengine/resource_vmwareengine_private_cloud_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mmv1/third_party/terraform/services/vmwareengine/resource_vmwareengine_private_cloud_test.go b/mmv1/third_party/terraform/services/vmwareengine/resource_vmwareengine_private_cloud_test.go index 40b0cde0a558..05195787f1a2 100644 --- a/mmv1/third_party/terraform/services/vmwareengine/resource_vmwareengine_private_cloud_test.go +++ b/mmv1/third_party/terraform/services/vmwareengine/resource_vmwareengine_private_cloud_test.go @@ -102,8 +102,8 @@ data "google_vmwareengine_private_cloud" "ds" { # NSX and Vcenter Credentials are child datasources of PC and are included in the PC test due to the high deployment time involved in the Creation and deletion of a PC data "google_vmwareengine_nsx_credentials" "nsx-ds" { - parent = google_vmwareengine_private_cloud.vmw-engine-pc - + parent = google_vmwareengine_private_cloud.vmw-engine-pc.id +} data "google_vmwareengine_vcenter_credentials" "vcenter-ds" { parent = google_vmwareengine_private_cloud.vmw-engine-pc.id } From 680968fde8a58272df843d8c2dc6867ac8a176b7 Mon Sep 17 00:00:00 2001 From: kautikdk <144651627+kautikdk@users.noreply.github.com> Date: Fri, 8 Dec 2023 20:10:42 +0000 Subject: [PATCH 40/44] Add rpo attribute to 'google_storage_bucket' resource (#9574) * Add rpo attribute for 'google_storage_bucket' resource * Modifies test-case, markdown and Fixes indent issues * Modifies rpo markdown and dualLocation_rpo testcase. --- .../storage/resource_storage_bucket.go.erb | 26 +++- .../resource_storage_bucket_test.go.erb | 117 ++++++++++++++++++ .../docs/r/storage_bucket.html.markdown | 2 + 3 files changed, 144 insertions(+), 1 deletion(-) diff --git a/mmv1/third_party/terraform/services/storage/resource_storage_bucket.go.erb b/mmv1/third_party/terraform/services/storage/resource_storage_bucket.go.erb index ca2ca5749529..8ea3385195cb 100644 --- a/mmv1/third_party/terraform/services/storage/resource_storage_bucket.go.erb +++ b/mmv1/third_party/terraform/services/storage/resource_storage_bucket.go.erb @@ -470,6 +470,12 @@ func ResourceStorageBucket() *schema.Resource { }, Description: `The bucket's custom location configuration, which specifies the individual regions that comprise a dual-region bucket. If the bucket is designated a single or multi-region, the parameters are empty.`, }, + "rpo": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `Specifies the RPO setting of bucket. If set 'ASYNC_TURBO', The Turbo Replication will be enabled for the dual-region bucket. Value 'DEFAULT' will set RPO setting to default. Turbo Replication is only for buckets in dual-regions.See the docs for more details.`, + }, "public_access_prevention": { Type: schema.TypeString, Optional: true, @@ -602,6 +608,10 @@ func resourceStorageBucketCreate(d *schema.ResourceData, meta interface{}) error sb.CustomPlacementConfig = expandBucketCustomPlacementConfig(v.([]interface{})) } + if v, ok := d.GetOk("rpo"); ok{ + sb.Rpo = v.(string) + } + var res *storage.Bucket err = transport_tpg.Retry(transport_tpg.RetryOptions{ @@ -765,6 +775,14 @@ func resourceStorageBucketUpdate(d *schema.ResourceData, meta interface{}) error sb.IamConfiguration = expandIamConfiguration(d) } + if d.HasChange("rpo") { + if v,ok := d.GetOk("rpo"); ok{ + sb.Rpo = v.(string) + } else { + sb.NullFields = append(sb.NullFields, "Rpo") + } + } + res, err := config.NewStorageClient(userAgent).Buckets.Patch(d.Get("name").(string), sb).Do() if err != nil { return err @@ -1691,7 +1709,13 @@ func setStorageBucket(d *schema.ResourceData, config *transport_tpg.Config, res if err := d.Set("custom_placement_config", flattenBucketCustomPlacementConfig(res.CustomPlacementConfig)); err != nil { return fmt.Errorf("Error setting custom_placement_config: %s", err) } - + // Needs to hide rpo field for single-region buckets. + // Check the Rpo field from API response to determine whether bucket is in single region config or not. + if res.Rpo != "" { + if err := d.Set("rpo", res.Rpo); err != nil { + return fmt.Errorf("Error setting RPO setting : %s", err) + } + } if res.IamConfiguration != nil && res.IamConfiguration.UniformBucketLevelAccess != nil { if err := d.Set("uniform_bucket_level_access", res.IamConfiguration.UniformBucketLevelAccess.Enabled); err != nil { return fmt.Errorf("Error setting uniform_bucket_level_access: %s", err) diff --git a/mmv1/third_party/terraform/services/storage/resource_storage_bucket_test.go.erb b/mmv1/third_party/terraform/services/storage/resource_storage_bucket_test.go.erb index 09c92686525d..7c686a2878cd 100644 --- a/mmv1/third_party/terraform/services/storage/resource_storage_bucket_test.go.erb +++ b/mmv1/third_party/terraform/services/storage/resource_storage_bucket_test.go.erb @@ -178,6 +178,98 @@ func TestAccStorageBucket_dualLocation(t *testing.T) { }) } +func TestAccStorageBucket_dualLocation_rpo(t *testing.T) { + t.Parallel() + bucketName := acctest.TestBucketName(t) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccStorageBucketDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccStorageBucket_dualLocation(bucketName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "rpo", "DEFAULT"), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + { + Config: testAccStorageBucket_dualLocation_rpo(bucketName,"ASYNC_TURBO"), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "rpo", "ASYNC_TURBO"), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + { + Config: testAccStorageBucket_dualLocation_rpo(bucketName,"DEFAULT"), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "rpo", "DEFAULT"), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + }, + }) +} + +func TestAccStorageBucket_multiLocation_rpo(t *testing.T) { + t.Parallel() + + bucketName := acctest.TestBucketName(t) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccStorageBucketDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccStorageBucket_basic(bucketName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "rpo", "DEFAULT"), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + { + Config: testAccStorageBucket_multiLocation_rpo(bucketName,"DEFAULT"), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "rpo", "DEFAULT"), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + }, + }) +} + func TestAccStorageBucket_customAttributes(t *testing.T) { t.Parallel() @@ -1491,6 +1583,31 @@ resource "google_storage_bucket" "bucket" { `, bucketName) } +func testAccStorageBucket_dualLocation_rpo(bucketName string,rpo string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "ASIA" + force_destroy = true + custom_placement_config { + data_locations = ["ASIA-EAST1", "ASIA-SOUTHEAST1"] + } + rpo = "%s" +} +`, bucketName,rpo) +} + +func testAccStorageBucket_multiLocation_rpo(bucketName string,rpo string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "ASIA" + force_destroy = true + rpo = "%s" +} +`, bucketName,rpo) +} + func testAccStorageBucket_customAttributes(bucketName string) string { return fmt.Sprintf(` resource "google_storage_bucket" "bucket" { diff --git a/mmv1/third_party/terraform/website/docs/r/storage_bucket.html.markdown b/mmv1/third_party/terraform/website/docs/r/storage_bucket.html.markdown index 0131446fffa8..2389ac04bf55 100644 --- a/mmv1/third_party/terraform/website/docs/r/storage_bucket.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/storage_bucket.html.markdown @@ -125,6 +125,8 @@ The following arguments are supported: * `requester_pays` - (Optional, Default: false) Enables [Requester Pays](https://cloud.google.com/storage/docs/requester-pays) on a storage bucket. +* `rpo` - (Optional) The recovery point objective for cross-region replication of the bucket. Applicable only for dual and multi-region buckets. `"DEFAULT"` sets default replication. `"ASYNC_TURBO"` value enables turbo replication, valid for dual-region buckets only. See [Turbo Replication](https://cloud.google.com/storage/docs/managing-turbo-replication) for more information. If rpo is not specified at bucket creation, it defaults to `"DEFAULT"` for dual and multi-region buckets. **NOTE** If used with single-region bucket, It will throw an error. + * `uniform_bucket_level_access` - (Optional, Default: false) Enables [Uniform bucket-level access](https://cloud.google.com/storage/docs/uniform-bucket-level-access) access to a bucket. * `public_access_prevention` - (Optional) Prevents public access to a bucket. Acceptable values are "inherited" or "enforced". If "inherited", the bucket uses [public access prevention](https://cloud.google.com/storage/docs/public-access-prevention). only if the bucket is subject to the public access prevention organization policy constraint. Defaults to "inherited". From a1b033aaea49e933ea13528e03b9ab2a46342770 Mon Sep 17 00:00:00 2001 From: Zhenhua Li Date: Fri, 8 Dec 2023 12:46:21 -0800 Subject: [PATCH 41/44] Remove the code related to api.yaml (#9596) --- mmv1/compiler.rb | 19 ++----------------- 1 file changed, 2 insertions(+), 17 deletions(-) diff --git a/mmv1/compiler.rb b/mmv1/compiler.rb index 1f2cdc5f82c3..c7c73c5d91e2 100755 --- a/mmv1/compiler.rb +++ b/mmv1/compiler.rb @@ -152,31 +152,16 @@ product_override_path = File.join(override_dir, product_name, 'product.yaml') if override_dir product_yaml_path = File.join(product_name, 'product.yaml') - api_override_path = '' - api_override_path = File.join(override_dir, product_name, 'api.yaml') if override_dir - api_yaml_path = File.join(product_name, 'api.yaml') - provider_override_path = '' provider_override_path = File.join(override_dir, product_name, "#{provider_name}.yaml") \ if override_dir provider_yaml_path = File.join(product_name, "#{provider_name}.yaml") - unless File.exist?(product_yaml_path) || File.exist?(product_override_path) \ - || File.exist?(api_yaml_path) || File.exist?(api_override_path) + unless File.exist?(product_yaml_path) || File.exist?(product_override_path) raise "#{product_name} does not contain an api.yaml or product.yaml file" end - if File.exist?(api_override_path) - result = if File.exist?(api_yaml_path) - YAML.load_file(api_yaml_path, permitted_classes: allowed_classes) \ - .merge(YAML.load_file(api_override_path, permitted_classes: allowed_classes)) - else - YAML.load_file(api_override_path, permitted_classes: allowed_classes) - end - product_yaml = result.to_yaml - elsif File.exist?(api_yaml_path) - product_yaml = File.read(api_yaml_path) - elsif File.exist?(product_override_path) + if File.exist?(product_override_path) result = if File.exist?(product_yaml_path) YAML.load_file(product_yaml_path, permitted_classes: allowed_classes) \ .merge(YAML.load_file(product_override_path, permitted_classes: allowed_classes)) From 2b5c4b2cdc0ac1764e38b5b985e4aec17d9d6401 Mon Sep 17 00:00:00 2001 From: Swamita Gupta <55314843+swamitagupta@users.noreply.github.com> Date: Sat, 9 Dec 2023 03:56:20 +0530 Subject: [PATCH 42/44] Promote Cluster to GA (#9550) --- mmv1/products/vmwareengine/Cluster.yaml | 3 - .../vmware_engine_cluster_basic.tf.erb | 9 +- .../vmware_engine_cluster_full.tf.erb | 9 +- .../provider/provider_mmv1_resources.go.erb | 2 - ...ata_source_google_vmwareengine_cluster.go} | 4 +- ... => resource_vmwareengine_cluster_test.go} | 85 +++++++------------ .../docs/d/vmwareengine_cluster.html.markdown | 4 - 7 files changed, 40 insertions(+), 76 deletions(-) rename mmv1/third_party/terraform/services/vmwareengine/{data_source_google_vmwareengine_cluster.go.erb => data_source_google_vmwareengine_cluster.go} (94%) rename mmv1/third_party/terraform/services/vmwareengine/{resource_vmwareengine_cluster_test.go.erb => resource_vmwareengine_cluster_test.go} (71%) diff --git a/mmv1/products/vmwareengine/Cluster.yaml b/mmv1/products/vmwareengine/Cluster.yaml index 06fc5d2e8a1b..e2fa5c8053c3 100644 --- a/mmv1/products/vmwareengine/Cluster.yaml +++ b/mmv1/products/vmwareengine/Cluster.yaml @@ -14,7 +14,6 @@ --- !ruby/object:Api::Resource name: "Cluster" -min_version: beta base_url: "{{parent}}/clusters" create_url: "{{parent}}/clusters?clusterId={{name}}" self_link: "{{parent}}/clusters/{{name}}" @@ -53,7 +52,6 @@ autogen_async: true examples: - !ruby/object:Provider::Terraform::Examples name: "vmware_engine_cluster_basic" - min_version: beta primary_resource_id: "vmw-engine-ext-cluster" skip_test: true # update tests will take care of create and update. PC and cluster creation is expensive and node reservation is required. vars: @@ -65,7 +63,6 @@ examples: region: :REGION - !ruby/object:Provider::Terraform::Examples name: "vmware_engine_cluster_full" - min_version: beta primary_resource_id: "vmw-ext-cluster" skip_test: true # update tests will take care of create and update. PC and cluster creation is expensive and node reservation is required. vars: diff --git a/mmv1/templates/terraform/examples/vmware_engine_cluster_basic.tf.erb b/mmv1/templates/terraform/examples/vmware_engine_cluster_basic.tf.erb index 05fc4467dc02..ef7152569e08 100644 --- a/mmv1/templates/terraform/examples/vmware_engine_cluster_basic.tf.erb +++ b/mmv1/templates/terraform/examples/vmware_engine_cluster_basic.tf.erb @@ -1,5 +1,4 @@ resource "google_vmwareengine_cluster" "<%= ctx[:primary_resource_id] %>" { - provider = google-beta name = "<%= ctx[:vars]['name'] %>" parent = google_vmwareengine_private_cloud.cluster-pc.id node_type_configs { @@ -9,7 +8,6 @@ resource "google_vmwareengine_cluster" "<%= ctx[:primary_resource_id] %>" { } resource "google_vmwareengine_private_cloud" "cluster-pc" { - provider = google-beta location = "<%= ctx[:test_env_vars]['region'] %>-a" name = "<%= ctx[:vars]['private_cloud_id'] %>" description = "Sample test PC." @@ -28,9 +26,8 @@ resource "google_vmwareengine_private_cloud" "cluster-pc" { } resource "google_vmwareengine_network" "cluster-nw" { - provider = google-beta - name = "<%= ctx[:test_env_vars]['region'] %>-default" - location = "<%= ctx[:test_env_vars]['region'] %>" - type = "LEGACY" + name = "<%= ctx[:vars]['network_id'] %>" + type = "STANDARD" + location = "global" description = "PC network description." } \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/vmware_engine_cluster_full.tf.erb b/mmv1/templates/terraform/examples/vmware_engine_cluster_full.tf.erb index 43ec853462c1..ce2584a72549 100644 --- a/mmv1/templates/terraform/examples/vmware_engine_cluster_full.tf.erb +++ b/mmv1/templates/terraform/examples/vmware_engine_cluster_full.tf.erb @@ -1,5 +1,4 @@ resource "google_vmwareengine_cluster" "<%= ctx[:primary_resource_id] %>" { - provider = google-beta name = "<%= ctx[:vars]['name'] %>" parent = google_vmwareengine_private_cloud.cluster-pc.id node_type_configs { @@ -10,7 +9,6 @@ resource "google_vmwareengine_cluster" "<%= ctx[:primary_resource_id] %>" { } resource "google_vmwareengine_private_cloud" "cluster-pc" { - provider = google-beta location = "<%= ctx[:test_env_vars]['region'] %>-a" name = "<%= ctx[:vars]['private_cloud_id'] %>" description = "Sample test PC." @@ -30,9 +28,8 @@ resource "google_vmwareengine_private_cloud" "cluster-pc" { } resource "google_vmwareengine_network" "cluster-nw" { - provider = google-beta - name = "<%= ctx[:test_env_vars]['region'] %>-default" - location = "<%= ctx[:test_env_vars]['region'] %>" - type = "LEGACY" + name = "<%= ctx[:vars]['network_id'] %>" + type = "STANDARD" + location = "global" description = "PC network description." } \ No newline at end of file diff --git a/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb b/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb index a43b78ba436c..d0bfb7e99f0d 100644 --- a/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb +++ b/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb @@ -200,9 +200,7 @@ var handwrittenDatasources = map[string]*schema.Resource{ "google_vpc_access_connector": vpcaccess.DataSourceVPCAccessConnector(), "google_redis_instance": redis.DataSourceGoogleRedisInstance(), "google_vertex_ai_index": vertexai.DataSourceVertexAIIndex(), - <% unless version == 'ga' -%> "google_vmwareengine_cluster": vmwareengine.DataSourceVmwareengineCluster(), - <% end -%> "google_vmwareengine_external_address": vmwareengine.DataSourceVmwareengineExternalAddress(), "google_vmwareengine_network": vmwareengine.DataSourceVmwareengineNetwork(), "google_vmwareengine_network_peering": vmwareengine.DataSourceVmwareengineNetworkPeering(), diff --git a/mmv1/third_party/terraform/services/vmwareengine/data_source_google_vmwareengine_cluster.go.erb b/mmv1/third_party/terraform/services/vmwareengine/data_source_google_vmwareengine_cluster.go similarity index 94% rename from mmv1/third_party/terraform/services/vmwareengine/data_source_google_vmwareengine_cluster.go.erb rename to mmv1/third_party/terraform/services/vmwareengine/data_source_google_vmwareengine_cluster.go index cdeff513d0e0..fe11f1b7b67e 100644 --- a/mmv1/third_party/terraform/services/vmwareengine/data_source_google_vmwareengine_cluster.go.erb +++ b/mmv1/third_party/terraform/services/vmwareengine/data_source_google_vmwareengine_cluster.go @@ -1,6 +1,5 @@ -<% autogen_exception -%> package vmwareengine -<% unless version == 'ga' -%> + import ( "fmt" @@ -38,4 +37,3 @@ func dataSourceVmwareengineClusterRead(d *schema.ResourceData, meta interface{}) } return nil } -<% end -%> \ No newline at end of file diff --git a/mmv1/third_party/terraform/services/vmwareengine/resource_vmwareengine_cluster_test.go.erb b/mmv1/third_party/terraform/services/vmwareengine/resource_vmwareengine_cluster_test.go similarity index 71% rename from mmv1/third_party/terraform/services/vmwareengine/resource_vmwareengine_cluster_test.go.erb rename to mmv1/third_party/terraform/services/vmwareengine/resource_vmwareengine_cluster_test.go index c128ec1ce24d..af97dbf89768 100644 --- a/mmv1/third_party/terraform/services/vmwareengine/resource_vmwareengine_cluster_test.go.erb +++ b/mmv1/third_party/terraform/services/vmwareengine/resource_vmwareengine_cluster_test.go @@ -1,11 +1,9 @@ -<% autogen_exception -%> package vmwareengine_test -<% unless version == 'ga' -%> import ( "fmt" - "testing" "strings" + "testing" "github.com/hashicorp/terraform-provider-google/google/acctest" "github.com/hashicorp/terraform-provider-google/google/envvar" @@ -17,10 +15,11 @@ import ( ) func TestAccVmwareengineCluster_vmwareEngineClusterUpdate(t *testing.T) { + acctest.SkipIfVcr(t) t.Parallel() context := map[string]interface{}{ - "region": "southamerica-west1", // using region with low node utilization. + "region": "southamerica-west1", // using region with low node utilization. "org_id": envvar.GetTestOrgFromEnv(t), "billing_account": envvar.GetTestBillingAccountFromEnv(t), "random_suffix": acctest.RandString(t, 10), @@ -28,14 +27,13 @@ func TestAccVmwareengineCluster_vmwareEngineClusterUpdate(t *testing.T) { acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), - CheckDestroy: testAccCheckVmwareengineClusterDestroyProducer(t), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckVmwareengineClusterDestroyProducer(t), Steps: []resource.TestStep{ { Config: testVmwareEngineClusterConfig(context, 3), Check: resource.ComposeTestCheckFunc( acctest.CheckDataSourceStateMatchesResourceStateWithIgnores("data.google_vmwareengine_cluster.ds", "google_vmwareengine_cluster.vmw-engine-ext-cluster", map[string]struct{}{}), - acctest.CheckDataSourceStateMatchesResourceStateWithIgnores("data.google_vmwareengine_private_cloud.ds", "google_vmwareengine_private_cloud.cluster-pc", map[string]struct{}{}), ), }, { @@ -44,19 +42,19 @@ func TestAccVmwareengineCluster_vmwareEngineClusterUpdate(t *testing.T) { ImportStateVerify: true, ImportStateVerifyIgnore: []string{"parent", "name"}, }, - { - Config: testVmwareEngineClusterConfig(context, 4), // expand the cluster - }, - { + { + Config: testVmwareEngineClusterConfig(context, 4), // expand the cluster + }, + { ResourceName: "google_vmwareengine_cluster.vmw-engine-ext-cluster", ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{"parent", "name"}, }, { - Config: testVmwareEngineClusterConfig(context, 3), // shrink the cluster. - }, - { + Config: testVmwareEngineClusterConfig(context, 3), // shrink the cluster. + }, + { ResourceName: "google_vmwareengine_cluster.vmw-engine-ext-cluster", ImportState: true, ImportStateVerify: true, @@ -67,66 +65,51 @@ func TestAccVmwareengineCluster_vmwareEngineClusterUpdate(t *testing.T) { } func testVmwareEngineClusterConfig(context map[string]interface{}, nodeCount int) string { - context["node_count"] = nodeCount; + context["node_count"] = nodeCount return acctest.Nprintf(` -resource "google_vmwareengine_cluster" "vmw-engine-ext-cluster" { - provider = google-beta - name = "tf-test-ext-cluster%{random_suffix}" - parent = google_vmwareengine_private_cloud.cluster-pc.id - node_type_configs { - node_type_id = "standard-72" - node_count = %{node_count} - custom_core_count = 32 - } +resource "google_vmwareengine_network" "cluster-nw" { + name = "tf-test-cluster-nw%{random_suffix}" + location = "global" + type = "STANDARD" + description = "PC network description." } resource "google_vmwareengine_private_cloud" "cluster-pc" { - provider = google-beta - location = "%{region}-a" - name = "tf-test-sample-pc%{random_suffix}" + location = "%{region}-a" + name = "tf-test-cluster-pc%{random_suffix}" description = "Sample test PC." network_config { - management_cidr = "192.168.30.0/24" + management_cidr = "192.168.10.0/24" vmware_engine_network = google_vmwareengine_network.cluster-nw.id } management_cluster { - cluster_id = "tf-test-sample-mgmt-cluster%{random_suffix}" + cluster_id = "tf-test-mgmt-cluster%{random_suffix}" node_type_configs { node_type_id = "standard-72" node_count = 3 - custom_core_count = 32 } } } -resource "google_vmwareengine_network" "cluster-nw" { - provider = google-beta - name = "%{region}-default" - location = "%{region}" - type = "LEGACY" - description = "PC network description." +resource "google_vmwareengine_cluster" "vmw-engine-ext-cluster" { + name = "tf-test-ext-cluster%{random_suffix}" + parent = google_vmwareengine_private_cloud.cluster-pc.id + node_type_configs { + node_type_id = "standard-72" + node_count = %{node_count} + custom_core_count = 32 + } } data "google_vmwareengine_cluster" ds { name = "tf-test-ext-cluster%{random_suffix}" - provider = google-beta - parent = google_vmwareengine_private_cloud.cluster-pc.id - depends_on = [ - google_vmwareengine_cluster.vmw-engine-ext-cluster, + parent = google_vmwareengine_private_cloud.cluster-pc.id + depends_on = [ + google_vmwareengine_cluster.vmw-engine-ext-cluster, ] } - -data "google_vmwareengine_private_cloud" ds { - location = "%{region}-a" - provider = google-beta - name = "tf-test-sample-pc%{random_suffix}" - depends_on = [ - google_vmwareengine_private_cloud.cluster-pc, - ] -} - `, context) } @@ -168,5 +151,3 @@ func testAccCheckVmwareengineClusterDestroyProducer(t *testing.T) func(s *terraf return nil } } - -<% end -%> \ No newline at end of file diff --git a/mmv1/third_party/terraform/website/docs/d/vmwareengine_cluster.html.markdown b/mmv1/third_party/terraform/website/docs/d/vmwareengine_cluster.html.markdown index 6da3627be83e..4d591081edf1 100644 --- a/mmv1/third_party/terraform/website/docs/d/vmwareengine_cluster.html.markdown +++ b/mmv1/third_party/terraform/website/docs/d/vmwareengine_cluster.html.markdown @@ -8,9 +8,6 @@ description: |- Use this data source to get details about a cluster resource. -~> **Warning:** This data source is in beta, and should be used with the terraform-provider-google-beta provider. -See [Provider Versions](https://terraform.io/docs/providers/google/guides/provider_versions.html) for more details on beta resources. - To get more information about private cloud cluster, see: * [API documentation](https://cloud.google.com/vmware-engine/docs/reference/rest/v1/projects.locations.privateClouds.clusters) @@ -18,7 +15,6 @@ To get more information about private cloud cluster, see: ```hcl data "google_vmwareengine_cluster" "my_cluster" { - provider = google-beta name = "my-cluster" parent = "project/locations/us-west1-a/privateClouds/my-cloud" } From 42281d39a26971a0bef4a8c495e22f6564a99bdf Mon Sep 17 00:00:00 2001 From: "Stephen Lewis (Burrows)" Date: Fri, 8 Dec 2023 15:08:56 -0800 Subject: [PATCH 43/44] Fixed TestAccLoggingProjectSink_updatePreservesCustomWriter (#9604) * Re-enabled test * Added sleep to allow IAM propagation * Added time provider to external providers * gofmt --- .../resource_logging_project_sink_test.go | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/mmv1/third_party/terraform/services/logging/resource_logging_project_sink_test.go b/mmv1/third_party/terraform/services/logging/resource_logging_project_sink_test.go index d89af08acdd1..ea6c1e83348b 100644 --- a/mmv1/third_party/terraform/services/logging/resource_logging_project_sink_test.go +++ b/mmv1/third_party/terraform/services/logging/resource_logging_project_sink_test.go @@ -165,8 +165,6 @@ func TestAccLoggingProjectSink_updatePreservesUniqueWriter(t *testing.T) { } func TestAccLoggingProjectSink_updatePreservesCustomWriter(t *testing.T) { - // Investigating failure reason, skipping in VCR for now - acctest.SkipIfVcr(t) t.Parallel() sinkName := "tf-test-sink-" + acctest.RandString(t, 10) @@ -186,7 +184,10 @@ func TestAccLoggingProjectSink_updatePreservesCustomWriter(t *testing.T) { acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckLoggingProjectSinkDestroyProducer(t), + ExternalProviders: map[string]resource.ExternalProvider{ + "time": {}, + }, + CheckDestroy: testAccCheckLoggingProjectSinkDestroyProducer(t), Steps: []resource.TestStep{ { Config: testAccLoggingProjectSink_customWriter(org, billingId, project, sinkName, account), @@ -529,6 +530,11 @@ resource "google_service_account_iam_member" "loggingsa-customsa-binding" { member = "serviceAccount:service-${local.project_number}@gcp-sa-logging.iam.gserviceaccount.com" } +resource "time_sleep" "wait_60_seconds" { + depends_on = [google_service_account_iam_member.loggingsa-customsa-binding] + create_duration = "60s" +} + resource "google_logging_project_sink" "custom_writer" { name = "%s" destination = "logging.googleapis.com/projects/${google_project.destination-project.project_id}/locations/us-central1/buckets/shared-bucket" @@ -538,8 +544,8 @@ resource "google_logging_project_sink" "custom_writer" { custom_writer_identity = "serviceAccount:${google_service_account.test-account1.email}" depends_on = [ - google_logging_project_bucket_config.destination-bucket, - google_service_account_iam_member.loggingsa-customsa-binding, + google_logging_project_bucket_config.destination-bucket, + time_sleep.wait_60_seconds, ] } `, project, project, org, billingId, serviceAccount, envvar.GetTestProjectFromEnv(), name, envvar.GetTestProjectFromEnv()) From 0d4f118b2a15029d7099b469a4c6da514df4e7c9 Mon Sep 17 00:00:00 2001 From: Scott Suarez Date: Fri, 8 Dec 2023 16:01:50 -0800 Subject: [PATCH 44/44] Build and test provider off main/feature (#9566) --- .github/workflows/build-downstream.yml | 2 +- .github/workflows/downstreams.yml | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build-downstream.yml b/.github/workflows/build-downstream.yml index 32fe3e526d37..f8446388ee65 100644 --- a/.github/workflows/build-downstream.yml +++ b/.github/workflows/build-downstream.yml @@ -17,7 +17,7 @@ jobs: generate-repository: runs-on: ubuntu-22.04 env: - BASE_BRANCH: ${{ github.event.pull_request.base.ref || github.ref }} + BASE_BRANCH: ${{ github.event.pull_request.base.ref || github.ref_name }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} steps: - name: Checkout Repository diff --git a/.github/workflows/downstreams.yml b/.github/workflows/downstreams.yml index 1c0114fbe324..98273ca7e18e 100644 --- a/.github/workflows/downstreams.yml +++ b/.github/workflows/downstreams.yml @@ -4,7 +4,8 @@ permissions: read-all on: push: branches: - - scott-test-* + - main + - 'FEATURE-BRANCH-*' concurrency: group: ${{ github.event_name == 'pull_request' && format('pr-{0}', github.event.pull_request.number) || format('commit-{0}', github.sha) }}